file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
images.py
from tgen.images import Images, ttypes as o from lib.blobby import Blobby, o as bo from lib.discovery import connect from redis import Redis import time from lib.imgcompare.avg import average_hash from cStringIO import StringIO from PIL import Image # events we will fire: # image_added : source_page_url, source_url, shahash # image_deleted : source_page_url, source_url, shahash class
(object): def __init__(self, redis_host='127.0.0.1'): self.redis_host = redis_host self.rc = Redis(redis_host) self.revent = ReventClient(redis_host=self.redis_host) # redis keys # incr this for the next image id # images:next_id = next_id # all the images for the given sha # images:datainstances:<shahash> = (ids) # timestamp of when image was added # images:ids:timestamps = sorted (ids,timestamp) # all the image ids for the page # images:page_ids:<page_url> (ids) # last time an image was added from page # images:pages:timestamps = sorted (url,timestamp) # images meta data # images:id = {} def _image_to_dict(self, image): data = {} ignored_attrs = ['data'] for attrs in image.thrift_spec[1:]: attr = attrs[2] if attr in ignored_attrs: continue v = getattr(image,attr) if v is not None: data[attr] = v return data def _dict_to_image(self, data): image = o.Image() for attrs in image.thrift_spec[1:]: attr = attrs[2] v = data.get(attr) if v is not None: # we might need to update the value # type, since all values come back # from redis as strings attr_type = attrs[1] # float if attr_type == 4: setattr(image,attr,float(v)) # int elif attr_type == 8: setattr(image,attr,int(v)) else: setattr(image,attr,v) return image def _delete_from_redis(self, image): # make these a transaction pipe = self.rc.pipeline() # remove it from the id set pipe.zrem('images:ids:timestamps',image.id) # remove it's hash pipe.delete('images:%s' % image.id) # decriment the count for it's image data pipe.srem('images:datainstances:%s' % image.shahash, image.id) # remove image from the page's id set if image.source_page_url: pipe.zrem('images:page_ids:%s' % image.source_page_url, image.id) # make it happen pipe.execute() return True def _save_to_redis(self, image): # make these a transaction pipe = self.rc.pipeline() # if our image doesn't have an id, set it up w/ one if not image.id: print 'got new image: %s' % image.shahash image.id = self.rc.incr('images:next_id') pipe.sadd('images:datainstances:%s' % image.shahash, image.id) # check and see if we used to have a different shahash old_shahash = self.rc.hget('images:%s' % image.id,'shahash') if old_shahash != image.shahash: # remove our id from the old shahash tracker pipe.srem('images:datainstances:%s' % old_shahash, image.id) # add it to the new tracker pipe.sadd('images:datainstances:%s' % image.shahash, image.id) # update / set our timestamp da = 0.0 if image.downloaded_at: da = image.downloaded_at else: da = time.time() pipe.zadd('images:ids:timestamps',image.id, da) # add this image to the page's id set if image.source_page_url: pipe.zadd('images:page_ids:%s' % image.source_page_url, image.id, da) # update our last scrape time for the page pipe.zadd('images:pages:timestamps', image.source_page_url, image.id) # take our image and make a dict image_data = self._image_to_dict(image) # set our data to redis key = 'images:%s' % image.id pipe.hmset(key,image_data) # execute our pipe pipe.execute() return image def _get_from_redis(self, image_id): # if the image id is in the id set than pull it's details if self.rc.zrank('images:ids:timestamps',image_id) is not None: # get the image data from redis key = 'images:%s' % image_id image_data = self.rc.hgetall(key) if not image_data: print 'redis had no image data' return None image = self._dict_to_image(image_data) return image return None def _populate_image_data(self, image): if not image.shahash: return None with connect(Blobby) as c: image.data = c.get_data(image.shahash) return image def _set_image_data(self, image): if image.data is not None: with connect(Blobby) as c: image.shahash = c.set_data(image.data) return image def get_image(self, image_id): """ returns Image for given id or blank Image """ # see if we have an image image = self._get_from_redis(image_id) if not image: raise o.ImageNotFound('Could not get image', image_id) # pull the actual image data self._populate_image_data(image) return image def add_image(self, image): """ like set but if we already have this image from this page we're not going to add it again. will also fill out image stats (size, dimension) """ # we're only for new images, no i'ds allowed # if u want to set an id by hand use set_image if image.id: raise o.Exception('Can not add image with id') if not image.data: raise o.Exception('Image must have data') if not image.source_page_url: raise o.Exception('Image must have source page url') # update it's stats image = self.populate_image_stats(image) # only add the image if we haven't seen it beforeQ # if we've seen it before there will be an id which # the set of images w/ this data and from this page share ids = self.rc.sinter('images:datainstance:%s' % image.shahash, 'images:page_ids:%s' % image.source_page_url) # we don't need to continue # we'll return back their original msg, w/o the id set if ids: print 'image already exists [%s], not setting' % ids return image # so the image appears to be new, good for it return self.set_image(image) def set_image(self, image): """ sets image data, returns image """ # would be better if we only saved if it didn't exist if image.data: # save the images data self._set_image_data(image) # could be an update, could be new image = self._save_to_redis(image) # let the world know we have added a new image self.revent.fire('image_added',{ 'source_page_url': image.source_page_url, 'source_url': image.source_url, 'shahash': image.shahash, 'vhash': image.vhash, 'xdim': image.xdim, 'ydim': image.ydim, }) return image def delete_image(self, image_id): """ removes an image """ # get it's image obj try: image = self.get_image(image_id) except o.ImageNotFound, ex: return False # delete the redis data self._delete_from_redis(image) # see if we need to remove the image data if self.rc.scard('images:datainstances:%s' % image.shahash) == 0: # no more images w/ the same data, remove image data with connect(Blobby) as c: c.delete_data(image.shahash) # it's gone, let'm know self.revent.fire('image_deleted',{ 'source_page_url': image.source_page_url, 'source_url': image.source_url, 'shahash': image.shahash, 'vhash': image.vhash, 'xdim': image.xdim, 'ydim': image.ydim, }) # and we're done! return True def get_images_since(self, image_id=None, timestamp=None, offset=10, limit=0): """ returns list of tublr images or blank list which were added after given image id or timestamp """ print '%s %s %s %s' % (image_id,timestamp,limit,offset) if image_id is not None: print 'got image id' # figure out what the current id is and than grab # our sorted set by index assuming that all ids # contain an image next_id = int(self.rc.get('images:next_id') or 0) # how far from the end is the id given d = next_id - image_id start = next_id - d end = next_id - d + limit - 1 print 'getting between %s %s' % (start,end) # starting back where we think this image is to + limit ids = self.rc.zrange('images:ids:timestamps',start,end) print 'got ids: %s' % ids elif timestamp: print 'from timestamp: %s' % timestamp # get ids from our sorted set by it's weight (aka timestamp) # TODO: not use inf ids = self.rc.zrangebyscore('images:ids:timestamps', timestamp,'+inf') else: print 'could not find images' ids = [] # page ids if offset < len(ids): ids = ids[offset:max(len(ids),limit)] else: ids = [] print 'found ids: %s' % ids # return images for each ID images = map(self._get_from_redis,ids) # populate image data map(self._populate_image_data,images) return images def search(self, source_blog_url=None, since_timestamp=None, before_timestamp=None, ids=[], source_url=None): """ returns list of images, searches passed on passed params """ pass def populate_image_stats(self, image): """ returns a Image w/ image data + stats filled out """ ti = image image_data = ti.data if not ti.data: return ti ti.size = len(image_data) try: with connect(Blobby) as c: ti.shahash = c.get_data_bhash(image_data) except o.Exception, ex: raise o.Exception('oException getting shahash: %s' % ex.msg) except Exception, ex: raise o.Exception('Exception getting shahash: %s' % ex) try: b = StringIO(image_data) img = Image.open(b) except Exception, ex: raise o.Exception('Exception getting PIL img: %s' % ex) try: ti.xdim, ti.ydim = img.size except Exception, ex: raise o.Exception('Exception getting dimensions: %s' % ex) try: ti.vhash = str(average_hash(img)) except Exception, ex: raise o.Exception('Exception getting vhash: %s' % ex) return ti def run(): from run_services import serve_service serve_service(Images, ImagesHandler())
ImagesHandler
identifier_name
bert_tagger_trainer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # file: bert_tagger_trainer.py import os import re import argparse import logging from typing import Dict from collections import namedtuple from utils.random_seed import set_random_seed set_random_seed(0) import torch import pytorch_lightning as pl from torch import Tensor from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.nn.modules import CrossEntropyLoss from pytorch_lightning import Trainer from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from transformers import AutoTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup from utils.get_parser import get_parser from datasets.tagger_ner_dataset import get_labels, TaggerNERDataset from datasets.truncate_dataset import TruncateDataset from datasets.collate_functions import tagger_collate_to_max_length from metrics.tagger_span_f1 import TaggerSpanF1 from metrics.functional.tagger_span_f1 import transform_predictions_to_labels from models.bert_tagger import BertTagger from models.model_config import BertTaggerConfig class BertSequenceLabeling(pl.LightningModule): def __init__( self, args: argparse.Namespace ): """Initialize a model, tokenizer and config.""" super().__init__() format = '%(asctime)s - %(name)s - %(message)s' if isinstance(args, argparse.Namespace): self.save_hyperparameters(args) self.args = args logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_result_log.txt"), level=logging.INFO) else: # eval mode TmpArgs = namedtuple("tmp_args", field_names=list(args.keys())) self.args = args = TmpArgs(**args) logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_test.txt"), level=logging.INFO) self.bert_dir = args.bert_config_dir self.data_dir = self.args.data_dir self.task_labels = get_labels(self.args.data_sign) self.num_labels = len(self.task_labels) self.task_idx2label = {label_idx : label_item for label_idx, label_item in enumerate(get_labels(self.args.data_sign))} bert_config = BertTaggerConfig.from_pretrained(args.bert_config_dir, hidden_dropout_prob=args.bert_dropout, attention_probs_dropout_prob=args.bert_dropout, num_labels=self.num_labels, classifier_dropout=args.classifier_dropout, classifier_sign=args.classifier_sign, classifier_act_func=args.classifier_act_func, classifier_intermediate_hidden_size=args.classifier_intermediate_hidden_size) self.tokenizer = AutoTokenizer.from_pretrained(args.bert_config_dir, use_fast=False, do_lower_case=args.do_lowercase) self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config) logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args)) self.result_logger = logging.getLogger(__name__) self.result_logger.setLevel(logging.INFO) self.loss_func = CrossEntropyLoss() self.span_f1 = TaggerSpanF1() self.chinese = args.chinese self.optimizer = args.optimizer @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--train_batch_size", type=int, default=8, help="batch size") parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size") parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate") parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear") parser.add_argument("--classifier_dropout", type=float, default=0.1) parser.add_argument("--classifier_act_func", type=str, default="gelu") parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024) parser.add_argument("--chinese", action="store_true", help="is chinese dataset") parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type") parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler") parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.") parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler") parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.") parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.") parser.add_argument("--do_lowercase", action="store_true", ) parser.add_argument("--data_file_suffix", type=str, default=".char.bmes") parser.add_argument("--lr_scheulder", type=str, default="polydecay") parser.add_argument("--lr_mini", type=float, default=-1) parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.") return parser def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.optimizer == "adamw": optimizer = AdamW(optimizer_grouped_parameters, betas=(0.9, 0.98), # according to RoBERTa paper lr=self.args.lr, eps=self.args.adam_epsilon,) elif self.optimizer == "torch.adam": optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon, weight_decay=self.args.weight_decay) else: raise ValueError("Optimizer type does not exist.") num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()]) t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs warmup_steps = int(self.args.warmup_proportion * t_total) if self.args.lr_scheduler == "onecycle": scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total), final_div_factor=self.args.final_div_factor, total_steps=t_total, anneal_strategy='linear') elif self.args.lr_scheduler == "linear": scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) elif self.args.lr_scheulder == "polydecay": if self.args.lr_mini == -1: lr_mini = self.args.lr / self.args.polydecay_ratio else: lr_mini = self.args.lr_mini scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini) else: raise ValueError return [optimizer], [{"scheduler": scheduler, "interval": "step"}] def forward(self, input_ids, token_type_ids, attention_mask): return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) def compute_loss(self, sequence_logits, sequence_labels, input_mask=None): if input_mask is not None: active_loss = input_mask.view(-1) == 1 active_logits = sequence_logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels) ) loss = self.loss_func(active_logits, active_labels) else: loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1)) return loss def training_step(self, batch, batch_idx): tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) tf_board_logs[f"train_loss"] = loss return {'loss': loss, 'log': tf_board_logs} def validation_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"val_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ") self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}") return {'val_loss': avg_loss, 'log': tensorboard_logs} def test_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"test_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 print(f"TEST INFO -> test_f1 is: {span_f1} precision: {span_precision}, recall: {span_recall}") self.result_logger.info(f"EVAL INFO -> test_f1 is: {span_f1}, test_precision is: {span_precision}, test_recall is: {span_recall}") return {'test_loss': avg_loss, 'log': tensorboard_logs} def train_dataloader(self) -> DataLoader: return self.get_dataloader("train") def val_dataloader(self) -> DataLoader: return self.get_dataloader("dev") def test_dataloader(self) -> DataLoader: return self.get_dataloader("test") def get_dataloader(self, prefix="train", limit: int = None) -> DataLoader: """get train/dev/test dataloader""" data_path = os.path.join(self.data_dir, f"{prefix}{self.args.data_file_suffix}") dataset = TaggerNERDataset(data_path, self.tokenizer, self.args.data_sign, max_length=self.args.max_length, is_chinese=self.args.chinese, pad_to_maxlen=False) if limit is not None: dataset = TruncateDataset(dataset, limit) if prefix == "train": batch_size = self.args.train_batch_size # define data_generator will help experiment reproducibility. # cannot use random data sampler since the gradient may explode. data_generator = torch.Generator() data_generator.manual_seed(self.args.seed) data_sampler = RandomSampler(dataset, generator=data_generator) else: data_sampler = SequentialSampler(dataset) batch_size = self.args.eval_batch_size dataloader = DataLoader( dataset=dataset, sampler=data_sampler, batch_size=batch_size, num_workers=self.args.workers, collate_fn=tagger_collate_to_max_length ) return dataloader def find_best_checkpoint_on_dev(output_dir: str, log_file: str = "eval_result_log.txt", only_keep_the_best_ckpt: bool = False): with open(os.path.join(output_dir, log_file)) as f: log_lines = f.readlines() F1_PATTERN = re.compile(r"span_f1 reached \d+\.\d* \(best") # val_f1 reached 0.00000 (best 0.00000) CKPT_PATTERN = re.compile(r"saving model to \S+ as top") checkpoint_info_lines = [] for log_line in log_lines: if "saving model to" in log_line: checkpoint_info_lines.append(log_line) # example of log line # Epoch 00000: val_f1 reached 0.00000 (best 0.00000), saving model to /data/xiaoya/outputs/0117/debug_5_12_2e-5_0.001_0.001_275_0.1_1_0.25/checkpoint/epoch=0.ckpt as top 20 best_f1_on_dev = 0 best_checkpoint_on_dev = "" for checkpoint_info_line in checkpoint_info_lines: current_f1 = float( re.findall(F1_PATTERN, checkpoint_info_line)[0].replace("span_f1 reached ", "").replace(" (best", "")) current_ckpt = re.findall(CKPT_PATTERN, checkpoint_info_line)[0].replace("saving model to ", "").replace( " as top", "") if current_f1 >= best_f1_on_dev: if only_keep_the_best_ckpt and len(best_checkpoint_on_dev) != 0: os.remove(best_checkpoint_on_dev) best_f1_on_dev = current_f1 best_checkpoint_on_dev = current_ckpt return best_f1_on_dev, best_checkpoint_on_dev def main(): """main""" parser = get_parser() # add model specific args parser = BertSequenceLabeling.add_model_specific_args(parser) # add all the available trainer options to argparse # ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli parser = Trainer.add_argparse_args(parser) args = parser.parse_args() model = BertSequenceLabeling(args) if args.pretrained_checkpoint: model.load_state_dict(torch.load(args.pretrained_checkpoint, map_location=torch.device('cpu'))["state_dict"]) checkpoint_callback = ModelCheckpoint( filepath=args.output_dir, save_top_k=args.max_keep_ckpt, verbose=True, monitor="span_f1", period=-1, mode="max", ) trainer = Trainer.from_argparse_args( args, checkpoint_callback=checkpoint_callback, deterministic=True, default_root_dir=args.output_dir ) trainer.fit(model) # after training, use the model checkpoint which achieves the best f1 score on dev set to compute the f1 on test set. best_f1_on_dev, path_to_best_checkpoint = find_best_checkpoint_on_dev(args.output_dir,) model.result_logger.info("=&" * 20) model.result_logger.info(f"Best F1 on DEV is {best_f1_on_dev}") model.result_logger.info(f"Best checkpoint on DEV set is {path_to_best_checkpoint}") checkpoint = torch.load(path_to_best_checkpoint) model.load_state_dict(checkpoint['state_dict']) trainer.test(model) model.result_logger.info("=&" * 20) if __name__ == '__main__': main()
def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]: avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() tensorboard_logs = {'test_loss': avg_loss}
random_line_split
bert_tagger_trainer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # file: bert_tagger_trainer.py import os import re import argparse import logging from typing import Dict from collections import namedtuple from utils.random_seed import set_random_seed set_random_seed(0) import torch import pytorch_lightning as pl from torch import Tensor from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.nn.modules import CrossEntropyLoss from pytorch_lightning import Trainer from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from transformers import AutoTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup from utils.get_parser import get_parser from datasets.tagger_ner_dataset import get_labels, TaggerNERDataset from datasets.truncate_dataset import TruncateDataset from datasets.collate_functions import tagger_collate_to_max_length from metrics.tagger_span_f1 import TaggerSpanF1 from metrics.functional.tagger_span_f1 import transform_predictions_to_labels from models.bert_tagger import BertTagger from models.model_config import BertTaggerConfig class BertSequenceLabeling(pl.LightningModule): def __init__( self, args: argparse.Namespace ): """Initialize a model, tokenizer and config.""" super().__init__() format = '%(asctime)s - %(name)s - %(message)s' if isinstance(args, argparse.Namespace): self.save_hyperparameters(args) self.args = args logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_result_log.txt"), level=logging.INFO) else: # eval mode TmpArgs = namedtuple("tmp_args", field_names=list(args.keys())) self.args = args = TmpArgs(**args) logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_test.txt"), level=logging.INFO) self.bert_dir = args.bert_config_dir self.data_dir = self.args.data_dir self.task_labels = get_labels(self.args.data_sign) self.num_labels = len(self.task_labels) self.task_idx2label = {label_idx : label_item for label_idx, label_item in enumerate(get_labels(self.args.data_sign))} bert_config = BertTaggerConfig.from_pretrained(args.bert_config_dir, hidden_dropout_prob=args.bert_dropout, attention_probs_dropout_prob=args.bert_dropout, num_labels=self.num_labels, classifier_dropout=args.classifier_dropout, classifier_sign=args.classifier_sign, classifier_act_func=args.classifier_act_func, classifier_intermediate_hidden_size=args.classifier_intermediate_hidden_size) self.tokenizer = AutoTokenizer.from_pretrained(args.bert_config_dir, use_fast=False, do_lower_case=args.do_lowercase) self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config) logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args)) self.result_logger = logging.getLogger(__name__) self.result_logger.setLevel(logging.INFO) self.loss_func = CrossEntropyLoss() self.span_f1 = TaggerSpanF1() self.chinese = args.chinese self.optimizer = args.optimizer @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--train_batch_size", type=int, default=8, help="batch size") parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size") parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate") parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear") parser.add_argument("--classifier_dropout", type=float, default=0.1) parser.add_argument("--classifier_act_func", type=str, default="gelu") parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024) parser.add_argument("--chinese", action="store_true", help="is chinese dataset") parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type") parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler") parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.") parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler") parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.") parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.") parser.add_argument("--do_lowercase", action="store_true", ) parser.add_argument("--data_file_suffix", type=str, default=".char.bmes") parser.add_argument("--lr_scheulder", type=str, default="polydecay") parser.add_argument("--lr_mini", type=float, default=-1) parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.") return parser def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.optimizer == "adamw": optimizer = AdamW(optimizer_grouped_parameters, betas=(0.9, 0.98), # according to RoBERTa paper lr=self.args.lr, eps=self.args.adam_epsilon,) elif self.optimizer == "torch.adam": optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon, weight_decay=self.args.weight_decay) else: raise ValueError("Optimizer type does not exist.") num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()]) t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs warmup_steps = int(self.args.warmup_proportion * t_total) if self.args.lr_scheduler == "onecycle": scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total), final_div_factor=self.args.final_div_factor, total_steps=t_total, anneal_strategy='linear') elif self.args.lr_scheduler == "linear": scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) elif self.args.lr_scheulder == "polydecay": if self.args.lr_mini == -1: lr_mini = self.args.lr / self.args.polydecay_ratio else: lr_mini = self.args.lr_mini scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini) else: raise ValueError return [optimizer], [{"scheduler": scheduler, "interval": "step"}] def forward(self, input_ids, token_type_ids, attention_mask): return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) def compute_loss(self, sequence_logits, sequence_labels, input_mask=None): if input_mask is not None: active_loss = input_mask.view(-1) == 1 active_logits = sequence_logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels) ) loss = self.loss_func(active_logits, active_labels) else: loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1)) return loss def training_step(self, batch, batch_idx): tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) tf_board_logs[f"train_loss"] = loss return {'loss': loss, 'log': tf_board_logs} def validation_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"val_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ") self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}") return {'val_loss': avg_loss, 'log': tensorboard_logs} def test_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"test_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]: avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() tensorboard_logs = {'test_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 print(f"TEST INFO -> test_f1 is: {span_f1} precision: {span_precision}, recall: {span_recall}") self.result_logger.info(f"EVAL INFO -> test_f1 is: {span_f1}, test_precision is: {span_precision}, test_recall is: {span_recall}") return {'test_loss': avg_loss, 'log': tensorboard_logs} def train_dataloader(self) -> DataLoader: return self.get_dataloader("train") def val_dataloader(self) -> DataLoader: return self.get_dataloader("dev") def test_dataloader(self) -> DataLoader: return self.get_dataloader("test") def get_dataloader(self, prefix="train", limit: int = None) -> DataLoader: """get train/dev/test dataloader""" data_path = os.path.join(self.data_dir, f"{prefix}{self.args.data_file_suffix}") dataset = TaggerNERDataset(data_path, self.tokenizer, self.args.data_sign, max_length=self.args.max_length, is_chinese=self.args.chinese, pad_to_maxlen=False) if limit is not None:
if prefix == "train": batch_size = self.args.train_batch_size # define data_generator will help experiment reproducibility. # cannot use random data sampler since the gradient may explode. data_generator = torch.Generator() data_generator.manual_seed(self.args.seed) data_sampler = RandomSampler(dataset, generator=data_generator) else: data_sampler = SequentialSampler(dataset) batch_size = self.args.eval_batch_size dataloader = DataLoader( dataset=dataset, sampler=data_sampler, batch_size=batch_size, num_workers=self.args.workers, collate_fn=tagger_collate_to_max_length ) return dataloader def find_best_checkpoint_on_dev(output_dir: str, log_file: str = "eval_result_log.txt", only_keep_the_best_ckpt: bool = False): with open(os.path.join(output_dir, log_file)) as f: log_lines = f.readlines() F1_PATTERN = re.compile(r"span_f1 reached \d+\.\d* \(best") # val_f1 reached 0.00000 (best 0.00000) CKPT_PATTERN = re.compile(r"saving model to \S+ as top") checkpoint_info_lines = [] for log_line in log_lines: if "saving model to" in log_line: checkpoint_info_lines.append(log_line) # example of log line # Epoch 00000: val_f1 reached 0.00000 (best 0.00000), saving model to /data/xiaoya/outputs/0117/debug_5_12_2e-5_0.001_0.001_275_0.1_1_0.25/checkpoint/epoch=0.ckpt as top 20 best_f1_on_dev = 0 best_checkpoint_on_dev = "" for checkpoint_info_line in checkpoint_info_lines: current_f1 = float( re.findall(F1_PATTERN, checkpoint_info_line)[0].replace("span_f1 reached ", "").replace(" (best", "")) current_ckpt = re.findall(CKPT_PATTERN, checkpoint_info_line)[0].replace("saving model to ", "").replace( " as top", "") if current_f1 >= best_f1_on_dev: if only_keep_the_best_ckpt and len(best_checkpoint_on_dev) != 0: os.remove(best_checkpoint_on_dev) best_f1_on_dev = current_f1 best_checkpoint_on_dev = current_ckpt return best_f1_on_dev, best_checkpoint_on_dev def main(): """main""" parser = get_parser() # add model specific args parser = BertSequenceLabeling.add_model_specific_args(parser) # add all the available trainer options to argparse # ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli parser = Trainer.add_argparse_args(parser) args = parser.parse_args() model = BertSequenceLabeling(args) if args.pretrained_checkpoint: model.load_state_dict(torch.load(args.pretrained_checkpoint, map_location=torch.device('cpu'))["state_dict"]) checkpoint_callback = ModelCheckpoint( filepath=args.output_dir, save_top_k=args.max_keep_ckpt, verbose=True, monitor="span_f1", period=-1, mode="max", ) trainer = Trainer.from_argparse_args( args, checkpoint_callback=checkpoint_callback, deterministic=True, default_root_dir=args.output_dir ) trainer.fit(model) # after training, use the model checkpoint which achieves the best f1 score on dev set to compute the f1 on test set. best_f1_on_dev, path_to_best_checkpoint = find_best_checkpoint_on_dev(args.output_dir,) model.result_logger.info("=&" * 20) model.result_logger.info(f"Best F1 on DEV is {best_f1_on_dev}") model.result_logger.info(f"Best checkpoint on DEV set is {path_to_best_checkpoint}") checkpoint = torch.load(path_to_best_checkpoint) model.load_state_dict(checkpoint['state_dict']) trainer.test(model) model.result_logger.info("=&" * 20) if __name__ == '__main__': main()
dataset = TruncateDataset(dataset, limit)
conditional_block
bert_tagger_trainer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # file: bert_tagger_trainer.py import os import re import argparse import logging from typing import Dict from collections import namedtuple from utils.random_seed import set_random_seed set_random_seed(0) import torch import pytorch_lightning as pl from torch import Tensor from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.nn.modules import CrossEntropyLoss from pytorch_lightning import Trainer from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from transformers import AutoTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup from utils.get_parser import get_parser from datasets.tagger_ner_dataset import get_labels, TaggerNERDataset from datasets.truncate_dataset import TruncateDataset from datasets.collate_functions import tagger_collate_to_max_length from metrics.tagger_span_f1 import TaggerSpanF1 from metrics.functional.tagger_span_f1 import transform_predictions_to_labels from models.bert_tagger import BertTagger from models.model_config import BertTaggerConfig class BertSequenceLabeling(pl.LightningModule): def __init__( self, args: argparse.Namespace ): """Initialize a model, tokenizer and config.""" super().__init__() format = '%(asctime)s - %(name)s - %(message)s' if isinstance(args, argparse.Namespace): self.save_hyperparameters(args) self.args = args logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_result_log.txt"), level=logging.INFO) else: # eval mode TmpArgs = namedtuple("tmp_args", field_names=list(args.keys())) self.args = args = TmpArgs(**args) logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_test.txt"), level=logging.INFO) self.bert_dir = args.bert_config_dir self.data_dir = self.args.data_dir self.task_labels = get_labels(self.args.data_sign) self.num_labels = len(self.task_labels) self.task_idx2label = {label_idx : label_item for label_idx, label_item in enumerate(get_labels(self.args.data_sign))} bert_config = BertTaggerConfig.from_pretrained(args.bert_config_dir, hidden_dropout_prob=args.bert_dropout, attention_probs_dropout_prob=args.bert_dropout, num_labels=self.num_labels, classifier_dropout=args.classifier_dropout, classifier_sign=args.classifier_sign, classifier_act_func=args.classifier_act_func, classifier_intermediate_hidden_size=args.classifier_intermediate_hidden_size) self.tokenizer = AutoTokenizer.from_pretrained(args.bert_config_dir, use_fast=False, do_lower_case=args.do_lowercase) self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config) logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args)) self.result_logger = logging.getLogger(__name__) self.result_logger.setLevel(logging.INFO) self.loss_func = CrossEntropyLoss() self.span_f1 = TaggerSpanF1() self.chinese = args.chinese self.optimizer = args.optimizer @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--train_batch_size", type=int, default=8, help="batch size") parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size") parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate") parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear") parser.add_argument("--classifier_dropout", type=float, default=0.1) parser.add_argument("--classifier_act_func", type=str, default="gelu") parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024) parser.add_argument("--chinese", action="store_true", help="is chinese dataset") parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type") parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler") parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.") parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler") parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.") parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.") parser.add_argument("--do_lowercase", action="store_true", ) parser.add_argument("--data_file_suffix", type=str, default=".char.bmes") parser.add_argument("--lr_scheulder", type=str, default="polydecay") parser.add_argument("--lr_mini", type=float, default=-1) parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.") return parser def
(self): """Prepare optimizer and schedule (linear warmup and decay)""" no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.optimizer == "adamw": optimizer = AdamW(optimizer_grouped_parameters, betas=(0.9, 0.98), # according to RoBERTa paper lr=self.args.lr, eps=self.args.adam_epsilon,) elif self.optimizer == "torch.adam": optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon, weight_decay=self.args.weight_decay) else: raise ValueError("Optimizer type does not exist.") num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()]) t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs warmup_steps = int(self.args.warmup_proportion * t_total) if self.args.lr_scheduler == "onecycle": scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total), final_div_factor=self.args.final_div_factor, total_steps=t_total, anneal_strategy='linear') elif self.args.lr_scheduler == "linear": scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) elif self.args.lr_scheulder == "polydecay": if self.args.lr_mini == -1: lr_mini = self.args.lr / self.args.polydecay_ratio else: lr_mini = self.args.lr_mini scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini) else: raise ValueError return [optimizer], [{"scheduler": scheduler, "interval": "step"}] def forward(self, input_ids, token_type_ids, attention_mask): return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) def compute_loss(self, sequence_logits, sequence_labels, input_mask=None): if input_mask is not None: active_loss = input_mask.view(-1) == 1 active_logits = sequence_logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels) ) loss = self.loss_func(active_logits, active_labels) else: loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1)) return loss def training_step(self, batch, batch_idx): tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) tf_board_logs[f"train_loss"] = loss return {'loss': loss, 'log': tf_board_logs} def validation_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"val_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ") self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}") return {'val_loss': avg_loss, 'log': tensorboard_logs} def test_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"test_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]: avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() tensorboard_logs = {'test_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 print(f"TEST INFO -> test_f1 is: {span_f1} precision: {span_precision}, recall: {span_recall}") self.result_logger.info(f"EVAL INFO -> test_f1 is: {span_f1}, test_precision is: {span_precision}, test_recall is: {span_recall}") return {'test_loss': avg_loss, 'log': tensorboard_logs} def train_dataloader(self) -> DataLoader: return self.get_dataloader("train") def val_dataloader(self) -> DataLoader: return self.get_dataloader("dev") def test_dataloader(self) -> DataLoader: return self.get_dataloader("test") def get_dataloader(self, prefix="train", limit: int = None) -> DataLoader: """get train/dev/test dataloader""" data_path = os.path.join(self.data_dir, f"{prefix}{self.args.data_file_suffix}") dataset = TaggerNERDataset(data_path, self.tokenizer, self.args.data_sign, max_length=self.args.max_length, is_chinese=self.args.chinese, pad_to_maxlen=False) if limit is not None: dataset = TruncateDataset(dataset, limit) if prefix == "train": batch_size = self.args.train_batch_size # define data_generator will help experiment reproducibility. # cannot use random data sampler since the gradient may explode. data_generator = torch.Generator() data_generator.manual_seed(self.args.seed) data_sampler = RandomSampler(dataset, generator=data_generator) else: data_sampler = SequentialSampler(dataset) batch_size = self.args.eval_batch_size dataloader = DataLoader( dataset=dataset, sampler=data_sampler, batch_size=batch_size, num_workers=self.args.workers, collate_fn=tagger_collate_to_max_length ) return dataloader def find_best_checkpoint_on_dev(output_dir: str, log_file: str = "eval_result_log.txt", only_keep_the_best_ckpt: bool = False): with open(os.path.join(output_dir, log_file)) as f: log_lines = f.readlines() F1_PATTERN = re.compile(r"span_f1 reached \d+\.\d* \(best") # val_f1 reached 0.00000 (best 0.00000) CKPT_PATTERN = re.compile(r"saving model to \S+ as top") checkpoint_info_lines = [] for log_line in log_lines: if "saving model to" in log_line: checkpoint_info_lines.append(log_line) # example of log line # Epoch 00000: val_f1 reached 0.00000 (best 0.00000), saving model to /data/xiaoya/outputs/0117/debug_5_12_2e-5_0.001_0.001_275_0.1_1_0.25/checkpoint/epoch=0.ckpt as top 20 best_f1_on_dev = 0 best_checkpoint_on_dev = "" for checkpoint_info_line in checkpoint_info_lines: current_f1 = float( re.findall(F1_PATTERN, checkpoint_info_line)[0].replace("span_f1 reached ", "").replace(" (best", "")) current_ckpt = re.findall(CKPT_PATTERN, checkpoint_info_line)[0].replace("saving model to ", "").replace( " as top", "") if current_f1 >= best_f1_on_dev: if only_keep_the_best_ckpt and len(best_checkpoint_on_dev) != 0: os.remove(best_checkpoint_on_dev) best_f1_on_dev = current_f1 best_checkpoint_on_dev = current_ckpt return best_f1_on_dev, best_checkpoint_on_dev def main(): """main""" parser = get_parser() # add model specific args parser = BertSequenceLabeling.add_model_specific_args(parser) # add all the available trainer options to argparse # ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli parser = Trainer.add_argparse_args(parser) args = parser.parse_args() model = BertSequenceLabeling(args) if args.pretrained_checkpoint: model.load_state_dict(torch.load(args.pretrained_checkpoint, map_location=torch.device('cpu'))["state_dict"]) checkpoint_callback = ModelCheckpoint( filepath=args.output_dir, save_top_k=args.max_keep_ckpt, verbose=True, monitor="span_f1", period=-1, mode="max", ) trainer = Trainer.from_argparse_args( args, checkpoint_callback=checkpoint_callback, deterministic=True, default_root_dir=args.output_dir ) trainer.fit(model) # after training, use the model checkpoint which achieves the best f1 score on dev set to compute the f1 on test set. best_f1_on_dev, path_to_best_checkpoint = find_best_checkpoint_on_dev(args.output_dir,) model.result_logger.info("=&" * 20) model.result_logger.info(f"Best F1 on DEV is {best_f1_on_dev}") model.result_logger.info(f"Best checkpoint on DEV set is {path_to_best_checkpoint}") checkpoint = torch.load(path_to_best_checkpoint) model.load_state_dict(checkpoint['state_dict']) trainer.test(model) model.result_logger.info("=&" * 20) if __name__ == '__main__': main()
configure_optimizers
identifier_name
bert_tagger_trainer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # file: bert_tagger_trainer.py import os import re import argparse import logging from typing import Dict from collections import namedtuple from utils.random_seed import set_random_seed set_random_seed(0) import torch import pytorch_lightning as pl from torch import Tensor from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.nn.modules import CrossEntropyLoss from pytorch_lightning import Trainer from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from transformers import AutoTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup from utils.get_parser import get_parser from datasets.tagger_ner_dataset import get_labels, TaggerNERDataset from datasets.truncate_dataset import TruncateDataset from datasets.collate_functions import tagger_collate_to_max_length from metrics.tagger_span_f1 import TaggerSpanF1 from metrics.functional.tagger_span_f1 import transform_predictions_to_labels from models.bert_tagger import BertTagger from models.model_config import BertTaggerConfig class BertSequenceLabeling(pl.LightningModule): def __init__( self, args: argparse.Namespace ): """Initialize a model, tokenizer and config.""" super().__init__() format = '%(asctime)s - %(name)s - %(message)s' if isinstance(args, argparse.Namespace): self.save_hyperparameters(args) self.args = args logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_result_log.txt"), level=logging.INFO) else: # eval mode TmpArgs = namedtuple("tmp_args", field_names=list(args.keys())) self.args = args = TmpArgs(**args) logging.basicConfig(format=format, filename=os.path.join(self.args.output_dir, "eval_test.txt"), level=logging.INFO) self.bert_dir = args.bert_config_dir self.data_dir = self.args.data_dir self.task_labels = get_labels(self.args.data_sign) self.num_labels = len(self.task_labels) self.task_idx2label = {label_idx : label_item for label_idx, label_item in enumerate(get_labels(self.args.data_sign))} bert_config = BertTaggerConfig.from_pretrained(args.bert_config_dir, hidden_dropout_prob=args.bert_dropout, attention_probs_dropout_prob=args.bert_dropout, num_labels=self.num_labels, classifier_dropout=args.classifier_dropout, classifier_sign=args.classifier_sign, classifier_act_func=args.classifier_act_func, classifier_intermediate_hidden_size=args.classifier_intermediate_hidden_size) self.tokenizer = AutoTokenizer.from_pretrained(args.bert_config_dir, use_fast=False, do_lower_case=args.do_lowercase) self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config) logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args)) self.result_logger = logging.getLogger(__name__) self.result_logger.setLevel(logging.INFO) self.loss_func = CrossEntropyLoss() self.span_f1 = TaggerSpanF1() self.chinese = args.chinese self.optimizer = args.optimizer @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--train_batch_size", type=int, default=8, help="batch size") parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size") parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate") parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear") parser.add_argument("--classifier_dropout", type=float, default=0.1) parser.add_argument("--classifier_act_func", type=str, default="gelu") parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024) parser.add_argument("--chinese", action="store_true", help="is chinese dataset") parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type") parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler") parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.") parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler") parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.") parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.") parser.add_argument("--do_lowercase", action="store_true", ) parser.add_argument("--data_file_suffix", type=str, default=".char.bmes") parser.add_argument("--lr_scheulder", type=str, default="polydecay") parser.add_argument("--lr_mini", type=float, default=-1) parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.") return parser def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.optimizer == "adamw": optimizer = AdamW(optimizer_grouped_parameters, betas=(0.9, 0.98), # according to RoBERTa paper lr=self.args.lr, eps=self.args.adam_epsilon,) elif self.optimizer == "torch.adam": optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon, weight_decay=self.args.weight_decay) else: raise ValueError("Optimizer type does not exist.") num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()]) t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs warmup_steps = int(self.args.warmup_proportion * t_total) if self.args.lr_scheduler == "onecycle": scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total), final_div_factor=self.args.final_div_factor, total_steps=t_total, anneal_strategy='linear') elif self.args.lr_scheduler == "linear": scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) elif self.args.lr_scheulder == "polydecay": if self.args.lr_mini == -1: lr_mini = self.args.lr / self.args.polydecay_ratio else: lr_mini = self.args.lr_mini scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini) else: raise ValueError return [optimizer], [{"scheduler": scheduler, "interval": "step"}] def forward(self, input_ids, token_type_ids, attention_mask): return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) def compute_loss(self, sequence_logits, sequence_labels, input_mask=None): if input_mask is not None: active_loss = input_mask.view(-1) == 1 active_logits = sequence_logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels) ) loss = self.loss_func(active_logits, active_labels) else: loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1)) return loss def training_step(self, batch, batch_idx):
def validation_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"val_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ") self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}") return {'val_loss': avg_loss, 'log': tensorboard_logs} def test_step(self, batch, batch_idx): output = {} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch batch_size = token_input_ids.shape[0] logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) output[f"test_loss"] = loss sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit") sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label") span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst) output["span_f1_stats"] = span_f1_stats return output def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]: avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() tensorboard_logs = {'test_loss': avg_loss} all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0) span_tp, span_fp, span_fn = all_counts span_recall = span_tp / (span_tp + span_fn + 1e-10) span_precision = span_tp / (span_tp + span_fp + 1e-10) span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10) tensorboard_logs[f"span_precision"] = span_precision tensorboard_logs[f"span_recall"] = span_recall tensorboard_logs[f"span_f1"] = span_f1 print(f"TEST INFO -> test_f1 is: {span_f1} precision: {span_precision}, recall: {span_recall}") self.result_logger.info(f"EVAL INFO -> test_f1 is: {span_f1}, test_precision is: {span_precision}, test_recall is: {span_recall}") return {'test_loss': avg_loss, 'log': tensorboard_logs} def train_dataloader(self) -> DataLoader: return self.get_dataloader("train") def val_dataloader(self) -> DataLoader: return self.get_dataloader("dev") def test_dataloader(self) -> DataLoader: return self.get_dataloader("test") def get_dataloader(self, prefix="train", limit: int = None) -> DataLoader: """get train/dev/test dataloader""" data_path = os.path.join(self.data_dir, f"{prefix}{self.args.data_file_suffix}") dataset = TaggerNERDataset(data_path, self.tokenizer, self.args.data_sign, max_length=self.args.max_length, is_chinese=self.args.chinese, pad_to_maxlen=False) if limit is not None: dataset = TruncateDataset(dataset, limit) if prefix == "train": batch_size = self.args.train_batch_size # define data_generator will help experiment reproducibility. # cannot use random data sampler since the gradient may explode. data_generator = torch.Generator() data_generator.manual_seed(self.args.seed) data_sampler = RandomSampler(dataset, generator=data_generator) else: data_sampler = SequentialSampler(dataset) batch_size = self.args.eval_batch_size dataloader = DataLoader( dataset=dataset, sampler=data_sampler, batch_size=batch_size, num_workers=self.args.workers, collate_fn=tagger_collate_to_max_length ) return dataloader def find_best_checkpoint_on_dev(output_dir: str, log_file: str = "eval_result_log.txt", only_keep_the_best_ckpt: bool = False): with open(os.path.join(output_dir, log_file)) as f: log_lines = f.readlines() F1_PATTERN = re.compile(r"span_f1 reached \d+\.\d* \(best") # val_f1 reached 0.00000 (best 0.00000) CKPT_PATTERN = re.compile(r"saving model to \S+ as top") checkpoint_info_lines = [] for log_line in log_lines: if "saving model to" in log_line: checkpoint_info_lines.append(log_line) # example of log line # Epoch 00000: val_f1 reached 0.00000 (best 0.00000), saving model to /data/xiaoya/outputs/0117/debug_5_12_2e-5_0.001_0.001_275_0.1_1_0.25/checkpoint/epoch=0.ckpt as top 20 best_f1_on_dev = 0 best_checkpoint_on_dev = "" for checkpoint_info_line in checkpoint_info_lines: current_f1 = float( re.findall(F1_PATTERN, checkpoint_info_line)[0].replace("span_f1 reached ", "").replace(" (best", "")) current_ckpt = re.findall(CKPT_PATTERN, checkpoint_info_line)[0].replace("saving model to ", "").replace( " as top", "") if current_f1 >= best_f1_on_dev: if only_keep_the_best_ckpt and len(best_checkpoint_on_dev) != 0: os.remove(best_checkpoint_on_dev) best_f1_on_dev = current_f1 best_checkpoint_on_dev = current_ckpt return best_f1_on_dev, best_checkpoint_on_dev def main(): """main""" parser = get_parser() # add model specific args parser = BertSequenceLabeling.add_model_specific_args(parser) # add all the available trainer options to argparse # ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli parser = Trainer.add_argparse_args(parser) args = parser.parse_args() model = BertSequenceLabeling(args) if args.pretrained_checkpoint: model.load_state_dict(torch.load(args.pretrained_checkpoint, map_location=torch.device('cpu'))["state_dict"]) checkpoint_callback = ModelCheckpoint( filepath=args.output_dir, save_top_k=args.max_keep_ckpt, verbose=True, monitor="span_f1", period=-1, mode="max", ) trainer = Trainer.from_argparse_args( args, checkpoint_callback=checkpoint_callback, deterministic=True, default_root_dir=args.output_dir ) trainer.fit(model) # after training, use the model checkpoint which achieves the best f1 score on dev set to compute the f1 on test set. best_f1_on_dev, path_to_best_checkpoint = find_best_checkpoint_on_dev(args.output_dir,) model.result_logger.info("=&" * 20) model.result_logger.info(f"Best F1 on DEV is {best_f1_on_dev}") model.result_logger.info(f"Best checkpoint on DEV set is {path_to_best_checkpoint}") checkpoint = torch.load(path_to_best_checkpoint) model.load_state_dict(checkpoint['state_dict']) trainer.test(model) model.result_logger.info("=&" * 20) if __name__ == '__main__': main()
tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']} token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask) tf_board_logs[f"train_loss"] = loss return {'loss': loss, 'log': tf_board_logs}
identifier_body
user-order.component.ts
import 'rxjs/add/operator/debounceTime'; import 'rxjs/add/operator/distinctUntilChanged'; import 'rxjs/add/operator/switchMap'; import 'rxjs/add/operator/switchMap'; import { Location } from '@angular/common'; import { HttpClient } from '@angular/common/http'; import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { NzMessageService, NzModalService, NzModalSubject } from 'ng-zorro-antd'; import { Subject } from 'rxjs/Subject'; import { API_ORDER_ITEM_DELETE, API_ORDER_ITEM_INSERT, API_ORDER_ITEM_UPDATE, API_USER_ORDER_DETAIL, API_USER_ORDER_INSERT, API_USER_ORDER_UPDATE, API_ORDER_ITEM_BATCH_UPDATE_CAR, } from '../../api/egg.api'; import { ApiRes, ApiResObj } from '../../model/api.model'; import { CarOrder, clearNewOrderItem, clearOrderField, DbStatus, OrderItem, OrderStatus, UserOrder, } from '../../model/egg.model'; import { CarSelectorComponent } from '../car-selector/car-selector.component'; @Component({ templateUrl: './user-order.component.html', styleUrls: ['./user-order.component.css'] }) export class UserOrderComponent implements OnInit { allChecked = false indeterminate = false checkedNumber = 0 checkedItems: OrderItem[] = [] orderSubject = new Subject() order: UserOrder = {} values: OrderItem[] = [] count = 0 weightCache: { [key: string]: string } = {} readonly = false tablePageIndex = 1 tablePageSize = 5 pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200] defaultCar: CarOrder popVisible: { [key: string]: boolean } = {} constructor( private route: ActivatedRoute, private location: Location, private router: Router, private subject: NzModalSubject, private http: HttpClient, private message: NzMessageService, private modal: NzModalService, ) { } addToCar() { this.modal.open({ title: '选择车次', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, width: 640, componentParams: { onSelect: (selectedCar: CarOrder) => { const req = { car: selectedCar.id, ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = req.car) }) } } }) } removeFromCar() { this.modal.confirm({ title: '移除', content: `确认移除吗?`, onOk: () => { const req = { ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = undefined) }) } }) } refreshStatus() { const allChecked = this.values.every(value => value.checked === true) const allUnChecked = this.values.every(value => !value.checked) this.allChecked = allChecked this.indeterminate = (!allChecked) && (!allUnChecked) this.checkedItems = this.values.filter(value => value.checked) this.checkedNumber = this.checkedItems.length } checkAll(value) { if (value) { this.values.forEach(item => { item.checked = true }) } else { this.values.forEach(item => { item.checked = false }) } this.refreshStatus() } descCar(car: CarOrder) { if (car) { return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}` } else { return '未选择' } } selectCar() { this.modal.open({ title: '选择默认车次(本单中的单位默认加入该车次)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: this.defaultCar, onSelect: (selectedCar: CarOrder) => { this.defaultCar = selectedCar this.order.car = this.defaultCar.id this.orderChange() } } }) } itemSelectCar(item: OrderItem, index: number) { let carOrder if (item.car) { carOrder = { id: item.car } } else { if (!item.id) { carOrder = this.defaultCar } } this.popVisible[index] = false this.modal.open({ title: '选择车次(只是该单位)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: carOrder, onSelect: (selectedCar: CarOrder) => { const isNewCar = item.car !== selectedCar.id item.car = selectedCar.id if (item.id && isNewCar) { item.status = '待更新' item.subject.next(item) } } } }) } itemDeleteCar(item: OrderItem, index: number) { this.popVisible[index] = false item.car = null item.status = '待更新' item.subject.next(item) } refreshTableData() { this.values = [...this.values] } itemIndex(index: number) { return (this.tablePageIndex - 1) * this.tablePageSize + index } isFinished() { return OrderStatus.FINISHED === this.order.status } orderChange() { this.orderSubject.next() } onEnter(weight: string) { this.doAddNewItem(weight) } itemChange(item: OrderItem, index: number) { if (index === this.values.length - 1) { this.doAddEmptyItem() } if (item.id) { // for foramt error then delete character if (item.weight
em.id]) { if (item.error) { item.error = false item.status = '上传完成' } return } item.status = '待更新' } else { item.status = '待上传' } item.subject.next(item) } itemBlur(item: OrderItem, index: number) { if (item.weight && !this.readonly) { this.itemChange(item, index) } } remove(item: UserOrder, index: number) { if (item.id) { this.modal.confirm({ title: '删除', content: '确认删除?', onOk: () => { this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => { this.values.splice(index, 1) this.refreshTableData() this.calcCount() delete this.weightCache[item.id] }) } }) } else { this.values.splice(index, 1) this.refreshTableData() } } doUpload(item: OrderItem) { const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/ if (item.weight && r.test(item.weight.toString())) { item.error = false if (item.id) { // update this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() this.calcCount() }) } else { // insert if (!item.dbStatus) { const user = this.order.id if (user) { item.user = user item.dbStatus = DbStatus.CREATING item.status = '数据创建中...' if (this.defaultCar && this.defaultCar.id) { item.car = this.defaultCar.id } this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() item.dbStatus = DbStatus.CREATED this.calcCount() }) } } } } else { item.status = '格式错误' item.error = true } } calcCount() { let c = 0 for (const o of this.values) { if (o.status === '上传完成') { c += 1 } } this.count = c } isItemSaved(item: OrderItem) { return item.status === '上传完成' } itemStyle(item: OrderItem) { if (this.isItemSaved(item)) { return { 'color': 'green' } } else { return { 'color': 'red' } } } doAddNewItem(weight: string) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight } this.values.push(newItem) this.refreshTableData() this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize) orderItemUploadSubject.next(newItem) } doAddEmptyItem() { // one item to one suject to reduce conflict const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) this.values.push({ status: '待上传', subject: orderItemUploadSubject }) this.refreshTableData() } doCommit() { this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => { const order = res.data.order const items = res.data.items const feItems = this.values.filter(item => { if (item.id) { return true } else { return false } }) const warnings = [] if (feItems.length !== items.length) { warnings.push('数量不一致') } for (let i = 0; i < feItems.length; ++i) { const feItem = feItems[i] const dbItem = items[i] if (!(dbItem && feItem.weight.toString() === dbItem.weight.toString() && this.isSameCar(feItem, dbItem))) { // tslint:disable-next-line:max-line-length warnings.push(`(序号 ${i + 1})=>前端: w-${feItem.weight},car-${feItem.car}, 数据库: w-${dbItem ? dbItem.weight : 'null'},car-${dbItem.car}`) } } this.modal.confirm({ title: `${warnings.length > 0 ? '数据异常, 请检查后' : ''}确认提交`, content: `编号: ${order.id}, 姓名: ${order.seller}, 手机: ${order.phone}, 数量: ${items.length}. ${warnings.join(',')}`, onOk: () => { this.order.status = OrderStatus.COMMITED this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { this.router.navigate(['/user-order-list']) }) } }) }) } isSameCar(a: OrderItem, b: OrderItem) { if (a.car) { if (b.car) { return a.car === b.car } else { return false } } else { if (b.car) { return false } else { return true } } } goBack() { this.router.navigate(['/user-order-list']) } ngOnInit(): void { this.route.queryParams.subscribe(query => { if (query.hasOwnProperty('readonly')) { this.readonly = true } }) this.route.params.subscribe(params => { const id = params['id'] if (id) { // edit or view this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[], car: CarOrder }>>(`${API_USER_ORDER_DETAIL}/${id}`).subscribe(res => { if (res.data.car) { this.defaultCar = res.data.car } this.order = res.data.order this.orderSubject.debounceTime(1000).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { }) }) this.count = res.data.items.length for (const item of res.data.items) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(uploadItem => { this.doUpload(uploadItem) }) this.weightCache[item.id] = item.weight.toString() item.status = '上传完成' item.subject = orderItemUploadSubject this.values.push(item) } if (!this.readonly) { // this.doAddEmptyItem() } this.refreshTableData() }) } else { // new // this.doAddEmptyItem() this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_INSERT, {}).subscribe(res => { this.order = res.data this.orderSubject.debounceTime(800).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, this.order).subscribe(updateRes => { }) }) }) } }) } }
.toString() === this.weightCache[it
conditional_block
user-order.component.ts
import 'rxjs/add/operator/debounceTime'; import 'rxjs/add/operator/distinctUntilChanged'; import 'rxjs/add/operator/switchMap'; import 'rxjs/add/operator/switchMap'; import { Location } from '@angular/common'; import { HttpClient } from '@angular/common/http'; import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { NzMessageService, NzModalService, NzModalSubject } from 'ng-zorro-antd'; import { Subject } from 'rxjs/Subject'; import { API_ORDER_ITEM_DELETE, API_ORDER_ITEM_INSERT, API_ORDER_ITEM_UPDATE, API_USER_ORDER_DETAIL, API_USER_ORDER_INSERT, API_USER_ORDER_UPDATE, API_ORDER_ITEM_BATCH_UPDATE_CAR, } from '../../api/egg.api'; import { ApiRes, ApiResObj } from '../../model/api.model'; import { CarOrder, clearNewOrderItem, clearOrderField, DbStatus, OrderItem, OrderStatus, UserOrder, } from '../../model/egg.model'; import { CarSelectorComponent } from '../car-selector/car-selector.component'; @Component({ templateUrl: './user-order.component.html', styleUrls: ['./user-order.component.css'] }) export class UserOrderComponent implements OnInit { allChecked = false indeterminate = false checkedNumber = 0 checkedItems: OrderItem[] = [] orderSubject = new Subject() order: UserOrder = {} values: OrderItem[] = [] count = 0 weightCache: { [key: string]: string } = {} readonly = false tablePageIndex = 1 tablePageSize = 5 pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200] defaultCar: CarOrder popVisible: { [key: string]: boolean } = {} constructor( private route: ActivatedRoute, private location: Location, private router: Router, private subject: NzModalSubject, private http: HttpClient, private message: NzMessageService, private modal: NzModalService, ) { } addToCar() { this.modal.open({ title: '选择车次', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, width: 640, componentParams: { onSelect: (selectedCar: CarOrder) => { const req = { car: selectedCar.id, ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = req.car) }) } } }) } removeFromCar() { this.modal.confirm({ title: '移除', content: `确认移除吗?`, onOk: () => { const req = { ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = undefined) }) } }) } refreshStatus() { const allChecked = this.values.every(value => value.checked === true) const allUnChecked = this.values.every(value => !value.checked) this.allChecked = allChecked this.indeterminate = (!allChecked) && (!allUnChecked) this.checkedItems = this.values.filter(value => value.checked) this.checkedNumber = this.checkedItems.length } checkAll(value) { if (value) { this.values.forEach(item => { item.checked = true }) } else { this.values.forEach(item => { item.checked = false }) } this.refreshStatus() } descCar(car: CarOrder) { if (car) { return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}` } else { return '未选择' } } selectCar() { this.modal.open({ title: '选择默认车次(本单中的单位默认加入该车次)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: this.defaultCar,
nSelect: (selectedCar: CarOrder) => { this.defaultCar = selectedCar this.order.car = this.defaultCar.id this.orderChange() } } }) } itemSelectCar(item: OrderItem, index: number) { let carOrder if (item.car) { carOrder = { id: item.car } } else { if (!item.id) { carOrder = this.defaultCar } } this.popVisible[index] = false this.modal.open({ title: '选择车次(只是该单位)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: carOrder, onSelect: (selectedCar: CarOrder) => { const isNewCar = item.car !== selectedCar.id item.car = selectedCar.id if (item.id && isNewCar) { item.status = '待更新' item.subject.next(item) } } } }) } itemDeleteCar(item: OrderItem, index: number) { this.popVisible[index] = false item.car = null item.status = '待更新' item.subject.next(item) } refreshTableData() { this.values = [...this.values] } itemIndex(index: number) { return (this.tablePageIndex - 1) * this.tablePageSize + index } isFinished() { return OrderStatus.FINISHED === this.order.status } orderChange() { this.orderSubject.next() } onEnter(weight: string) { this.doAddNewItem(weight) } itemChange(item: OrderItem, index: number) { if (index === this.values.length - 1) { this.doAddEmptyItem() } if (item.id) { // for foramt error then delete character if (item.weight.toString() === this.weightCache[item.id]) { if (item.error) { item.error = false item.status = '上传完成' } return } item.status = '待更新' } else { item.status = '待上传' } item.subject.next(item) } itemBlur(item: OrderItem, index: number) { if (item.weight && !this.readonly) { this.itemChange(item, index) } } remove(item: UserOrder, index: number) { if (item.id) { this.modal.confirm({ title: '删除', content: '确认删除?', onOk: () => { this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => { this.values.splice(index, 1) this.refreshTableData() this.calcCount() delete this.weightCache[item.id] }) } }) } else { this.values.splice(index, 1) this.refreshTableData() } } doUpload(item: OrderItem) { const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/ if (item.weight && r.test(item.weight.toString())) { item.error = false if (item.id) { // update this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() this.calcCount() }) } else { // insert if (!item.dbStatus) { const user = this.order.id if (user) { item.user = user item.dbStatus = DbStatus.CREATING item.status = '数据创建中...' if (this.defaultCar && this.defaultCar.id) { item.car = this.defaultCar.id } this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() item.dbStatus = DbStatus.CREATED this.calcCount() }) } } } } else { item.status = '格式错误' item.error = true } } calcCount() { let c = 0 for (const o of this.values) { if (o.status === '上传完成') { c += 1 } } this.count = c } isItemSaved(item: OrderItem) { return item.status === '上传完成' } itemStyle(item: OrderItem) { if (this.isItemSaved(item)) { return { 'color': 'green' } } else { return { 'color': 'red' } } } doAddNewItem(weight: string) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight } this.values.push(newItem) this.refreshTableData() this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize) orderItemUploadSubject.next(newItem) } doAddEmptyItem() { // one item to one suject to reduce conflict const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) this.values.push({ status: '待上传', subject: orderItemUploadSubject }) this.refreshTableData() } doCommit() { this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => { const order = res.data.order const items = res.data.items const feItems = this.values.filter(item => { if (item.id) { return true } else { return false } }) const warnings = [] if (feItems.length !== items.length) { warnings.push('数量不一致') } for (let i = 0; i < feItems.length; ++i) { const feItem = feItems[i] const dbItem = items[i] if (!(dbItem && feItem.weight.toString() === dbItem.weight.toString() && this.isSameCar(feItem, dbItem))) { // tslint:disable-next-line:max-line-length warnings.push(`(序号 ${i + 1})=>前端: w-${feItem.weight},car-${feItem.car}, 数据库: w-${dbItem ? dbItem.weight : 'null'},car-${dbItem.car}`) } } this.modal.confirm({ title: `${warnings.length > 0 ? '数据异常, 请检查后' : ''}确认提交`, content: `编号: ${order.id}, 姓名: ${order.seller}, 手机: ${order.phone}, 数量: ${items.length}. ${warnings.join(',')}`, onOk: () => { this.order.status = OrderStatus.COMMITED this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { this.router.navigate(['/user-order-list']) }) } }) }) } isSameCar(a: OrderItem, b: OrderItem) { if (a.car) { if (b.car) { return a.car === b.car } else { return false } } else { if (b.car) { return false } else { return true } } } goBack() { this.router.navigate(['/user-order-list']) } ngOnInit(): void { this.route.queryParams.subscribe(query => { if (query.hasOwnProperty('readonly')) { this.readonly = true } }) this.route.params.subscribe(params => { const id = params['id'] if (id) { // edit or view this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[], car: CarOrder }>>(`${API_USER_ORDER_DETAIL}/${id}`).subscribe(res => { if (res.data.car) { this.defaultCar = res.data.car } this.order = res.data.order this.orderSubject.debounceTime(1000).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { }) }) this.count = res.data.items.length for (const item of res.data.items) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(uploadItem => { this.doUpload(uploadItem) }) this.weightCache[item.id] = item.weight.toString() item.status = '上传完成' item.subject = orderItemUploadSubject this.values.push(item) } if (!this.readonly) { // this.doAddEmptyItem() } this.refreshTableData() }) } else { // new // this.doAddEmptyItem() this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_INSERT, {}).subscribe(res => { this.order = res.data this.orderSubject.debounceTime(800).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, this.order).subscribe(updateRes => { }) }) }) } }) } }
o
identifier_name
user-order.component.ts
import 'rxjs/add/operator/debounceTime'; import 'rxjs/add/operator/distinctUntilChanged'; import 'rxjs/add/operator/switchMap'; import 'rxjs/add/operator/switchMap'; import { Location } from '@angular/common'; import { HttpClient } from '@angular/common/http'; import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { NzMessageService, NzModalService, NzModalSubject } from 'ng-zorro-antd'; import { Subject } from 'rxjs/Subject'; import { API_ORDER_ITEM_DELETE, API_ORDER_ITEM_INSERT, API_ORDER_ITEM_UPDATE, API_USER_ORDER_DETAIL, API_USER_ORDER_INSERT, API_USER_ORDER_UPDATE, API_ORDER_ITEM_BATCH_UPDATE_CAR, } from '../../api/egg.api'; import { ApiRes, ApiResObj } from '../../model/api.model'; import { CarOrder, clearNewOrderItem, clearOrderField, DbStatus, OrderItem, OrderStatus, UserOrder, } from '../../model/egg.model'; import { CarSelectorComponent } from '../car-selector/car-selector.component'; @Component({ templateUrl: './user-order.component.html', styleUrls: ['./user-order.component.css'] }) export class UserOrderComponent implements OnInit { allChecked = false indeterminate = false checkedNumber = 0 checkedItems: OrderItem[] = [] orderSubject = new Subject() order: UserOrder = {} values: OrderItem[] = [] count = 0 weightCache: { [key: string]: string } = {} readonly = false tablePageIndex = 1 tablePageSize = 5 pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200] defaultCar: CarOrder popVisible: { [key: string]: boolean } = {} constructor( private route: ActivatedRoute, private location: Location, private router: Router, private subject: NzModalSubject, private http: HttpClient, private message: NzMessageService, private modal: NzModalService, ) { } addToCar() { this.modal.open({ title: '选择车次', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, width: 640, componentParams: { onSelect: (selectedCar: CarOrder) => { const req = { car: selectedCar.id, ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = req.car) }) } } }) } removeFromCar() { this.modal.confirm({ title: '移除', content: `确认移除吗?`, onOk: () => { const req = { ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = undefined) }) } }) } refreshStatus() { const allChecked = this.values.every(value => value.checked === true) const allUnChecked = this.values.every(value => !value.checked) this.allChecked = allChecked this.indeterminate = (!allChecked) && (!allUnChecked) this.checkedItems = this.values.filter(value => value.checked) this.checkedNumber = this.checkedItems.length } checkAll(value) { if (value) { this.values.forEach(item => { item.checked = true }) } else { this.values.forEach(item => { item.checked = false }) } this.refreshStatus() } descCar(car: CarOrder) { if (car) { return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}` } else { return '未选择' } } selectCar() { this.modal.open({ title: '选择默认车次(本单中的单位默认加入该车次)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: this.de
ltCar, onSelect: (selectedCar: CarOrder) => { this.defaultCar = selectedCar this.order.car = this.defaultCar.id this.orderChange() } } }) } itemSelectCar(item: OrderItem, index: number) { let carOrder if (item.car) { carOrder = { id: item.car } } else { if (!item.id) { carOrder = this.defaultCar } } this.popVisible[index] = false this.modal.open({ title: '选择车次(只是该单位)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: carOrder, onSelect: (selectedCar: CarOrder) => { const isNewCar = item.car !== selectedCar.id item.car = selectedCar.id if (item.id && isNewCar) { item.status = '待更新' item.subject.next(item) } } } }) } itemDeleteCar(item: OrderItem, index: number) { this.popVisible[index] = false item.car = null item.status = '待更新' item.subject.next(item) } refreshTableData() { this.values = [...this.values] } itemIndex(index: number) { return (this.tablePageIndex - 1) * this.tablePageSize + index } isFinished() { return OrderStatus.FINISHED === this.order.status } orderChange() { this.orderSubject.next() } onEnter(weight: string) { this.doAddNewItem(weight) } itemChange(item: OrderItem, index: number) { if (index === this.values.length - 1) { this.doAddEmptyItem() } if (item.id) { // for foramt error then delete character if (item.weight.toString() === this.weightCache[item.id]) { if (item.error) { item.error = false item.status = '上传完成' } return } item.status = '待更新' } else { item.status = '待上传' } item.subject.next(item) } itemBlur(item: OrderItem, index: number) { if (item.weight && !this.readonly) { this.itemChange(item, index) } } remove(item: UserOrder, index: number) { if (item.id) { this.modal.confirm({ title: '删除', content: '确认删除?', onOk: () => { this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => { this.values.splice(index, 1) this.refreshTableData() this.calcCount() delete this.weightCache[item.id] }) } }) } else { this.values.splice(index, 1) this.refreshTableData() } } doUpload(item: OrderItem) { const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/ if (item.weight && r.test(item.weight.toString())) { item.error = false if (item.id) { // update this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() this.calcCount() }) } else { // insert if (!item.dbStatus) { const user = this.order.id if (user) { item.user = user item.dbStatus = DbStatus.CREATING item.status = '数据创建中...' if (this.defaultCar && this.defaultCar.id) { item.car = this.defaultCar.id } this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() item.dbStatus = DbStatus.CREATED this.calcCount() }) } } } } else { item.status = '格式错误' item.error = true } } calcCount() { let c = 0 for (const o of this.values) { if (o.status === '上传完成') { c += 1 } } this.count = c } isItemSaved(item: OrderItem) { return item.status === '上传完成' } itemStyle(item: OrderItem) { if (this.isItemSaved(item)) { return { 'color': 'green' } } else { return { 'color': 'red' } } } doAddNewItem(weight: string) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight } this.values.push(newItem) this.refreshTableData() this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize) orderItemUploadSubject.next(newItem) } doAddEmptyItem() { // one item to one suject to reduce conflict const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) this.values.push({ status: '待上传', subject: orderItemUploadSubject }) this.refreshTableData() } doCommit() { this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => { const order = res.data.order const items = res.data.items const feItems = this.values.filter(item => { if (item.id) { return true } else { return false } }) const warnings = [] if (feItems.length !== items.length) { warnings.push('数量不一致') } for (let i = 0; i < feItems.length; ++i) { const feItem = feItems[i] const dbItem = items[i] if (!(dbItem && feItem.weight.toString() === dbItem.weight.toString() && this.isSameCar(feItem, dbItem))) { // tslint:disable-next-line:max-line-length warnings.push(`(序号 ${i + 1})=>前端: w-${feItem.weight},car-${feItem.car}, 数据库: w-${dbItem ? dbItem.weight : 'null'},car-${dbItem.car}`) } } this.modal.confirm({ title: `${warnings.length > 0 ? '数据异常, 请检查后' : ''}确认提交`, content: `编号: ${order.id}, 姓名: ${order.seller}, 手机: ${order.phone}, 数量: ${items.length}. ${warnings.join(',')}`, onOk: () => { this.order.status = OrderStatus.COMMITED this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { this.router.navigate(['/user-order-list']) }) } }) }) } isSameCar(a: OrderItem, b: OrderItem) { if (a.car) { if (b.car) { return a.car === b.car } else { return false } } else { if (b.car) { return false } else { return true } } } goBack() { this.router.navigate(['/user-order-list']) } ngOnInit(): void { this.route.queryParams.subscribe(query => { if (query.hasOwnProperty('readonly')) { this.readonly = true } }) this.route.params.subscribe(params => { const id = params['id'] if (id) { // edit or view this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[], car: CarOrder }>>(`${API_USER_ORDER_DETAIL}/${id}`).subscribe(res => { if (res.data.car) { this.defaultCar = res.data.car } this.order = res.data.order this.orderSubject.debounceTime(1000).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { }) }) this.count = res.data.items.length for (const item of res.data.items) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(uploadItem => { this.doUpload(uploadItem) }) this.weightCache[item.id] = item.weight.toString() item.status = '上传完成' item.subject = orderItemUploadSubject this.values.push(item) } if (!this.readonly) { // this.doAddEmptyItem() } this.refreshTableData() }) } else { // new // this.doAddEmptyItem() this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_INSERT, {}).subscribe(res => { this.order = res.data this.orderSubject.debounceTime(800).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, this.order).subscribe(updateRes => { }) }) }) } }) } }
fau
identifier_body
user-order.component.ts
import 'rxjs/add/operator/debounceTime'; import 'rxjs/add/operator/distinctUntilChanged'; import 'rxjs/add/operator/switchMap'; import 'rxjs/add/operator/switchMap'; import { Location } from '@angular/common'; import { HttpClient } from '@angular/common/http'; import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { NzMessageService, NzModalService, NzModalSubject } from 'ng-zorro-antd'; import { Subject } from 'rxjs/Subject'; import { API_ORDER_ITEM_DELETE, API_ORDER_ITEM_INSERT, API_ORDER_ITEM_UPDATE, API_USER_ORDER_DETAIL, API_USER_ORDER_INSERT, API_USER_ORDER_UPDATE, API_ORDER_ITEM_BATCH_UPDATE_CAR, } from '../../api/egg.api'; import { ApiRes, ApiResObj } from '../../model/api.model'; import { CarOrder, clearNewOrderItem, clearOrderField, DbStatus, OrderItem, OrderStatus, UserOrder, } from '../../model/egg.model'; import { CarSelectorComponent } from '../car-selector/car-selector.component'; @Component({ templateUrl: './user-order.component.html', styleUrls: ['./user-order.component.css'] }) export class UserOrderComponent implements OnInit { allChecked = false indeterminate = false checkedNumber = 0 checkedItems: OrderItem[] = [] orderSubject = new Subject() order: UserOrder = {} values: OrderItem[] = [] count = 0 weightCache: { [key: string]: string } = {} readonly = false tablePageIndex = 1 tablePageSize = 5 pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200] defaultCar: CarOrder popVisible: { [key: string]: boolean } = {} constructor( private route: ActivatedRoute, private location: Location, private router: Router, private subject: NzModalSubject, private http: HttpClient, private message: NzMessageService, private modal: NzModalService, ) { } addToCar() { this.modal.open({ title: '选择车次', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, width: 640, componentParams: { onSelect: (selectedCar: CarOrder) => { const req = { car: selectedCar.id, ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = req.car) }) } } }) } removeFromCar() { this.modal.confirm({ title: '移除', content: `确认移除吗?`, onOk: () => { const req = { ids: this.checkedItems.map(item => item.id), isByUser: false, } this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => { this.message.success('操作成功') this.checkedItems.forEach(item => item.car = undefined) }) } }) } refreshStatus() { const allChecked = this.values.every(value => value.checked === true) const allUnChecked = this.values.every(value => !value.checked)
this.allChecked = allChecked this.indeterminate = (!allChecked) && (!allUnChecked) this.checkedItems = this.values.filter(value => value.checked) this.checkedNumber = this.checkedItems.length } checkAll(value) { if (value) { this.values.forEach(item => { item.checked = true }) } else { this.values.forEach(item => { item.checked = false }) } this.refreshStatus() } descCar(car: CarOrder) { if (car) { return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}` } else { return '未选择' } } selectCar() { this.modal.open({ title: '选择默认车次(本单中的单位默认加入该车次)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: this.defaultCar, onSelect: (selectedCar: CarOrder) => { this.defaultCar = selectedCar this.order.car = this.defaultCar.id this.orderChange() } } }) } itemSelectCar(item: OrderItem, index: number) { let carOrder if (item.car) { carOrder = { id: item.car } } else { if (!item.id) { carOrder = this.defaultCar } } this.popVisible[index] = false this.modal.open({ title: '选择车次(只是该单位)', content: CarSelectorComponent, onOk() { }, onCancel() { }, footer: false, componentParams: { data: carOrder, onSelect: (selectedCar: CarOrder) => { const isNewCar = item.car !== selectedCar.id item.car = selectedCar.id if (item.id && isNewCar) { item.status = '待更新' item.subject.next(item) } } } }) } itemDeleteCar(item: OrderItem, index: number) { this.popVisible[index] = false item.car = null item.status = '待更新' item.subject.next(item) } refreshTableData() { this.values = [...this.values] } itemIndex(index: number) { return (this.tablePageIndex - 1) * this.tablePageSize + index } isFinished() { return OrderStatus.FINISHED === this.order.status } orderChange() { this.orderSubject.next() } onEnter(weight: string) { this.doAddNewItem(weight) } itemChange(item: OrderItem, index: number) { if (index === this.values.length - 1) { this.doAddEmptyItem() } if (item.id) { // for foramt error then delete character if (item.weight.toString() === this.weightCache[item.id]) { if (item.error) { item.error = false item.status = '上传完成' } return } item.status = '待更新' } else { item.status = '待上传' } item.subject.next(item) } itemBlur(item: OrderItem, index: number) { if (item.weight && !this.readonly) { this.itemChange(item, index) } } remove(item: UserOrder, index: number) { if (item.id) { this.modal.confirm({ title: '删除', content: '确认删除?', onOk: () => { this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => { this.values.splice(index, 1) this.refreshTableData() this.calcCount() delete this.weightCache[item.id] }) } }) } else { this.values.splice(index, 1) this.refreshTableData() } } doUpload(item: OrderItem) { const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/ if (item.weight && r.test(item.weight.toString())) { item.error = false if (item.id) { // update this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() this.calcCount() }) } else { // insert if (!item.dbStatus) { const user = this.order.id if (user) { item.user = user item.dbStatus = DbStatus.CREATING item.status = '数据创建中...' if (this.defaultCar && this.defaultCar.id) { item.car = this.defaultCar.id } this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => { item.status = '上传完成' item.id = res.data.id this.weightCache[item.id] = item.weight.toString() item.dbStatus = DbStatus.CREATED this.calcCount() }) } } } } else { item.status = '格式错误' item.error = true } } calcCount() { let c = 0 for (const o of this.values) { if (o.status === '上传完成') { c += 1 } } this.count = c } isItemSaved(item: OrderItem) { return item.status === '上传完成' } itemStyle(item: OrderItem) { if (this.isItemSaved(item)) { return { 'color': 'green' } } else { return { 'color': 'red' } } } doAddNewItem(weight: string) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight } this.values.push(newItem) this.refreshTableData() this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize) orderItemUploadSubject.next(newItem) } doAddEmptyItem() { // one item to one suject to reduce conflict const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(item => { this.doUpload(item) }) this.values.push({ status: '待上传', subject: orderItemUploadSubject }) this.refreshTableData() } doCommit() { this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => { const order = res.data.order const items = res.data.items const feItems = this.values.filter(item => { if (item.id) { return true } else { return false } }) const warnings = [] if (feItems.length !== items.length) { warnings.push('数量不一致') } for (let i = 0; i < feItems.length; ++i) { const feItem = feItems[i] const dbItem = items[i] if (!(dbItem && feItem.weight.toString() === dbItem.weight.toString() && this.isSameCar(feItem, dbItem))) { // tslint:disable-next-line:max-line-length warnings.push(`(序号 ${i + 1})=>前端: w-${feItem.weight},car-${feItem.car}, 数据库: w-${dbItem ? dbItem.weight : 'null'},car-${dbItem.car}`) } } this.modal.confirm({ title: `${warnings.length > 0 ? '数据异常, 请检查后' : ''}确认提交`, content: `编号: ${order.id}, 姓名: ${order.seller}, 手机: ${order.phone}, 数量: ${items.length}. ${warnings.join(',')}`, onOk: () => { this.order.status = OrderStatus.COMMITED this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { this.router.navigate(['/user-order-list']) }) } }) }) } isSameCar(a: OrderItem, b: OrderItem) { if (a.car) { if (b.car) { return a.car === b.car } else { return false } } else { if (b.car) { return false } else { return true } } } goBack() { this.router.navigate(['/user-order-list']) } ngOnInit(): void { this.route.queryParams.subscribe(query => { if (query.hasOwnProperty('readonly')) { this.readonly = true } }) this.route.params.subscribe(params => { const id = params['id'] if (id) { // edit or view this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[], car: CarOrder }>>(`${API_USER_ORDER_DETAIL}/${id}`).subscribe(res => { if (res.data.car) { this.defaultCar = res.data.car } this.order = res.data.order this.orderSubject.debounceTime(1000).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, clearOrderField(this.order)).subscribe(updateRes => { }) }) this.count = res.data.items.length for (const item of res.data.items) { const orderItemUploadSubject = new Subject<OrderItem>() orderItemUploadSubject.debounceTime(500).subscribe(uploadItem => { this.doUpload(uploadItem) }) this.weightCache[item.id] = item.weight.toString() item.status = '上传完成' item.subject = orderItemUploadSubject this.values.push(item) } if (!this.readonly) { // this.doAddEmptyItem() } this.refreshTableData() }) } else { // new // this.doAddEmptyItem() this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_INSERT, {}).subscribe(res => { this.order = res.data this.orderSubject.debounceTime(800).subscribe(() => { this.http.post<ApiRes<UserOrder>>(API_USER_ORDER_UPDATE, this.order).subscribe(updateRes => { }) }) }) } }) } }
random_line_split
app.js
var fs = require('fs'); var archiver = require('archiver'); var express = require('express'); var multer = require('multer'); var path = require('path'); var cheerio = require('cheerio'); var es = require('event-stream'); var parse = require('csv-parse'); var Datauri = require('datauri'); var lunr = require('lunr'); var yauzl = require("yauzl"); var app = express(); app.use(multer({ dest: './uploads/', onFileUploadComplete: function (file, request, response) { // NOTE: request and response were null (?) } })); app.set('port', (process.env.PORT || 5010)); app.use(express.static('public')); app.post('/upload', function(request, response) { console.log("request received:"); console.log(request.files); if (request.files && request.files.file) { console.log("Upload received " + request.files.file.originalname); var posterFile; if (request.files.posterFile) { posterFile = request.files.posterFile.path; } var transcriptFile; if (request.files.transcript_zip) { transcriptFile = request.files.transcript_zip.path; } //console.log(request.body); // form fields //console.log(request.files); // form files var converted = doConversion({ name: request.files.file.originalname, path: request.files.file.path, transcript_path: transcriptFile, posterFile: posterFile, response: response, id: request.body.requestid, title: request.body.title, mediaPath: request.body.path == undefined ? "" : request.body.path, zipfiles: request.body.zipfiles, bannerDownloadLabel: request.body.bannerDownloadLabel, bannerDownloadLink: request.body.bannerDownloadLink, /*courseZipfile: request.body.courseZipfile,*/ sampleMode: request.body.sampleMode, sampleModeLink: request.body.sampleModeLink, isbn: request.body.isbn, skin: request.body.skin }); } }); var http = require('http'); var server = http.Server(app); var io = require('socket.io')(server); var connections = []; io.on('connection', function (socket) { socket.on("id", function (id) { connections.push( { socket: socket, id: id }); }); }); server.listen((process.env.PORT || 5010), function () { console.log("video-toc-converter is running on port:" + app.get('port')); }); function sendProgress (id, progress) { for (var i = 0; i < connections.length; i++) { var c = connections[i]; if (c.id == id) { var obj = { progress: progress, id: id }; c.socket.emit("progress", obj); } } } function deleteFolderRecursive (path) { if (fs.existsSync(path)) { fs.readdirSync(path).forEach(function (file, index) { var curPath = path + "/" + file; var d = fs.statSync(curPath); if (d.isDirectory()) { deleteFolderRecursive(curPath); } else { // delete file fs.unlinkSync(curPath); } }); fs.rmdirSync(path); } } function makeAllPaths (dir) { var paths = dir.split(path.sep); var curPath = ""; for (var i = 0; i < paths.length; i++) { curPath += paths[i] + path.sep; try { fs.accessSync(curPath, fs.W_OK); } catch (err) { fs.mkdirSync(curPath); } } } function doConversion (options) { options.timestamp = Date.now(); options.idx = lunr(function () { this.field('title'); this.field('body'); }); var input = fs.readFileSync(options.path, "utf8"); var parseOptions = { delimiter: "\t", quote: "" }; parse(input, parseOptions, function(err, output) { if (!err) { processData(options, output); if (options.transcript_path) processTranscript(options); else doneWithTranscript(options); } else { console.log("error"); console.log(err); } }); } function processPosterImage (options) { if (options.posterFile) { var imageURI = new Datauri(options.posterFile); options.posterImageData = imageURI.content; } } function processData (options, data) { var toc = []; var lastPart = -1, lastLesson = -1, lastSublesson = -1, lastSubsublesson = -1, lastDepth = undefined; var last = [undefined, undefined, undefined, undefined]; var counters = [-1, -1, -1, -1]; for (var i = 0; i < data.length; i++) { var row = data[i]; var obj = {}; var parsed = { part: row[0], lesson: row[1], short: row[2], sublesson: row[3], subsublesson: row[4], filename: row[5], duration: row[6], isDisabled: row[7] }; var description = parsed.part; if (description == "") description = parsed.lesson; if (description == "") description = parsed.sublesson; if (description == "") description = parsed.subsublesson; parsed.description = description; obj.video = parsed.filename; var duration = parsed.duration; if (duration) { obj.isVideo = true; obj.duration = duration; } else { obj.isVideo = false; } var info = parseInfoFromText(parsed); parseDepthsFromFields(obj, info, last, counters); obj.short = info.short; obj.desc = info.desc; obj.disabled = parsed.isDisabled; if (obj.desc == "Learning Objectives") { obj.short = obj.lesson + ".0"; } var curDepth = []; curDepth.push(obj.part); curDepth.push(obj.lesson); curDepth.push(obj.sublesson); curDepth.push(obj.subsublesson); obj.depth = ""; for (var j = 0; j < curDepth.length; j++) { if (curDepth[j] != -1 && curDepth[j] != undefined) { if (obj.depth != "") obj.depth += ","; obj.depth += curDepth[j]; } } lastPart = obj.part; lastLesson = obj.lesson; lastSublesson = obj.sublesson; lastSubsublesson = obj.subsublesson; lastInfoLesson = info.lesson; lastDepth = curDepth; toc.push(obj); } options.toc = toc; processPosterImage(options); options.lastPart = lastPart; options.lastLesson = lastLesson; options.lastSublesson = lastSublesson; options.lastDepth = lastDepth; } function parseDepthsFromFields (obj, info, last, counters) { if (info.part != "" && info.part != last.part) { counters[0] = counters[0] + 1; counters[1] = counters[2] = counters[3] = -1; obj.part = counters[0]; last.part = info.part; } else if (info.lesson != "" && info.lesson != last.lesson) { counters[1] = counters[1] + 1; counters[2] = counters[3] = -1; obj.lesson = counters[1]; last.lesson = info.lesson; } else if (info.sublesson != "" && info.sublesson != last.sublesson) { counters[2] = counters[2] + 1; counters[3] = -1; obj.sublesson = counters[2]; last.sublesson = info.sublesson; } else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) { counters[3] = counters[3] + 1; obj.subsublesson = counters[3]; last.subsublesson = info.sublesson; } if (counters[0] != -1) obj.part = counters[0]; if (counters[1] != -1) obj.lesson = counters[1]; if (counters[2] != -1) obj.sublesson = counters[2]; if (counters[3] != -1) obj.subsublesson = counters[3]; } function generateJavascriptTOC (options) { var s = "define([], function () {\n"; var returnObj = { toc: [], markers: [] }; var lastTopLevel = undefined; if (options.lastDepth) { for (var i = 0; i < options.lastDepth.length; i++) { if (options.lastDepth[i] != undefined) { lastTopLevel = parseInt(options.lastDepth[i]); break; } } } for (var i = 0; i < options.toc.length; i++) { var entry = options.toc[i]; var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration}; if (entry.captions) obj.captions = entry.captions; if (entry.transcript) obj.transcript = entry.transcript; // add this TOC entry to the search index var doc = { "title": entry.desc, "id": i }; options.idx.add(doc); if (options.zipfiles) { /* // THEORY: lessons between 1 and n-1 get zipfile links var lessonNumber = parseInt(entry.lesson); if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) { var lessondigits = parseInt(entry.lesson); if (lessondigits < 10) lessondigits = "0" + lessondigits; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip"); } */ // NEW THEORY: top-level depths get zipfile links var depths = entry.depth.split(","); var count = 0; var first_level = undefined; for (var j = 0; j < depths.length; j++) { if (depths[j] != undefined) { count++; if (first_level == undefined) first_level = depths[j]; } } if (count == 1) { var d = parseInt(first_level); if (d > 0 && d < lastTopLevel) { if (d < 10) d = "0" + d; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip"); } } } if (entry.isVideo) { obj.video = path.join(options.mediaPath, entry.video.toLowerCase()); } else if (entry.video) { if (entry.video.toLowerCase().indexOf(".html") != -1) { obj.src = entry.video; } else { obj.video = entry.video; } } if (entry.disabled) { obj.disabled = true; } returnObj.toc.push(obj); } returnObj.projectTitle = options.title; returnObj.bannerDownloadLabel = options.bannerDownloadLabel; returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined; returnObj.posterImageData = options.posterImageData; options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});"; } function parseInfoFromText (params) { var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description }; var found = false; if (!found) { // look for: "Lesson _: Title" in filename reg = /^lesson (.*):\s(.*)/i; res = reg.exec(params.filename); if (res) { obj.short = res[1]; found = true; } } if (!found) { // X.Y Title in description reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/; res = reg.exec(params.description); if (res) { obj.short = res[1] + "." + res[2]; obj.desc = res[3]; found = true; } } return obj; } function streamToString (stream, cb) { var chunks = []; stream.on('data', function (chunk) { chunks.push(chunk); }); stream.on('end', function () { cb(chunks.join('')); }); } function processTranscript (options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; // unzip transcript zip // convert srt to vtt // associate videos with transcript files // add transcript (vtt or dbxf) to lunr search index // zip up vtt, dbxf, and search index yauzl.open(options.transcript_path, function (err, zipfile) { if (err) throw err; zipfile.on("close", function () { doneWithTranscript(options); }); zipfile.on("entry", function (entry) { if (/\/$/.test(entry.fileName)) { // directory file names end with '/' return; } zipfile.openReadStream(entry, function (err, readStream) { if (err) throw err; readStream.setEncoding('utf8'); // process the srt files if (entry.fileName.indexOf(".srt") != -1) { // THEORY: find the toc video file that most closely matches this srt file var tocReference = findTOCReference(options.toc, entry.fileName); var newFilename = entry.fileName.replace(".srt", ".vtt"); streamToString(readStream, function (s) { var writePath = path.join(targetDir + "/media/vtt/", newFilename); var filePath = path.dirname(writePath); makeAllPaths(filePath); s = s.replace(/\r/g, ""); var searchableText = ""; var output = "WEBVTT\n\n"; var lines = s.split("\n"); var count = 0; for (var i = 0; i < lines.length; i++) { var line = lines[i]; if (line == "") count = 0; else count++; if (count == 2) { // replace commas in timing lines with periods line = line.replace(/,/g, "."); // add line position to move the cue up a little (CSS was ineffective) line += " line:80%"; } else if (count > 2) { searchableText += line; } output += line + "\n"; } output = output.trim(); fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"}); if (tocReference) { var doc = { "title": tocReference.title, "body": searchableText, "id": tocReference.index }; options.toc[tocReference.index].captions = "media/vtt/" + newFilename; var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp"; options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename; options.idx.add(doc); } }); } else if (entry.fileName.indexOf(".dfxp") != -1) { var writePath = path.join(targetDir + "/media/transcript/", entry.fileName); // ensure parent directory exists var filePath = path.dirname(writePath); makeAllPaths(filePath); // write file readStream.pipe(fs.createWriteStream(writePath)); } }); }); }); } function findTOCReference (toc, filename) { var file = path.basename(filename, path.extname(filename)); // assuming the transcript file is in this format: 9780789756350-02_04_01.vtt var dash = file.indexOf("-"); if (dash != -1) { file = file.substr(dash + 1); } if (file)
return undefined; } function includeSearch (archive, options) { var search_index = JSON.stringify(options.idx.toJSON()); var search_module = "define([], function () { return " + search_index + "; });"; archive.append(search_module, { name: "/search_index.js" }); } function includeTranscriptFolders (archive, options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; makeAllPaths(targetDir + "media/vtt/"); archive.directory(targetDir + "media/vtt/", "/media/vtt/"); makeAllPaths(targetDir + "media/transcript/"); archive.directory(targetDir + "media/transcript/", "/media/transcript/"); } function doneWithTranscript (options) { generateJavascriptTOC(options); writeZip(options); } function completelyDone (options) { sendProgress(options.id, 100); options.response.json({"link": options.outputFile}); } function writeZip (options) { var returnDir = options.name + options.timestamp; var targetDir = path.join("temp", returnDir, "output"); var outputFile = "conversions/" + returnDir + ".zip"; var outputPath = "public/" + outputFile; var dir = path.dirname(outputPath); makeAllPaths(dir); var folder = options.title; var outputStream = fs.createWriteStream(outputPath); var archive = archiver('zip'); outputStream.on("close", function () { doneWithZip(options, outputStream); }); archive.pipe(outputStream); archive.append(options.tocJS, { name: "/toc.js"}); includeViewer(archive, options); includeSearch(archive, options); includeTranscriptFolders(archive, options); archive.finalize(); options.outputFile = outputFile; } function doneWithZip (options, outputStream) { if (outputStream) { doCleanup(options, outputStream); } completelyDone(options); } function doCleanup (options, outputStream) { console.log("cleaning up"); var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; outputStream.close(); fs.unlinkSync(options.path); deleteFolderRecursive(targetDir); console.log("done"); } function includeViewer (archive, options) { archive.file("public/runcourse.html", { name: "/runcourse.html" }); archive.file("public/viewer.js", { name: "/viewer.js" }); var settings = { title: options.title, type: "metadata", infinite_scrolling: false, skin: options.skin }; if (options.sampleMode) { settings.buyButton = options.sampleModeLink; var pw = fs.readFileSync("public/paywall.html").toString(); pw = pw.replace("path-to-buy", options.sampleModeLink); archive.append(pw, { name: "/paywall.html" }); } var settings_string = JSON.stringify(settings); var manifest = "define([], function () { return " + settings_string + "; });"; archive.append(manifest, { name: "/manifest.js" }); }
{ for (var i = 0; i < toc.length; i++) { var entry = toc[i]; if (entry.video && entry.video.indexOf(file) != -1) { return { title: entry.desc, index: i } } } }
conditional_block
app.js
var fs = require('fs'); var archiver = require('archiver'); var express = require('express'); var multer = require('multer'); var path = require('path'); var cheerio = require('cheerio'); var es = require('event-stream'); var parse = require('csv-parse'); var Datauri = require('datauri'); var lunr = require('lunr'); var yauzl = require("yauzl"); var app = express(); app.use(multer({ dest: './uploads/', onFileUploadComplete: function (file, request, response) { // NOTE: request and response were null (?) } })); app.set('port', (process.env.PORT || 5010)); app.use(express.static('public')); app.post('/upload', function(request, response) { console.log("request received:"); console.log(request.files); if (request.files && request.files.file) { console.log("Upload received " + request.files.file.originalname); var posterFile; if (request.files.posterFile) { posterFile = request.files.posterFile.path; } var transcriptFile; if (request.files.transcript_zip) { transcriptFile = request.files.transcript_zip.path; } //console.log(request.body); // form fields //console.log(request.files); // form files var converted = doConversion({ name: request.files.file.originalname, path: request.files.file.path, transcript_path: transcriptFile, posterFile: posterFile, response: response, id: request.body.requestid, title: request.body.title, mediaPath: request.body.path == undefined ? "" : request.body.path, zipfiles: request.body.zipfiles, bannerDownloadLabel: request.body.bannerDownloadLabel, bannerDownloadLink: request.body.bannerDownloadLink, /*courseZipfile: request.body.courseZipfile,*/ sampleMode: request.body.sampleMode, sampleModeLink: request.body.sampleModeLink, isbn: request.body.isbn, skin: request.body.skin }); } }); var http = require('http'); var server = http.Server(app); var io = require('socket.io')(server); var connections = []; io.on('connection', function (socket) { socket.on("id", function (id) { connections.push( { socket: socket, id: id }); }); }); server.listen((process.env.PORT || 5010), function () { console.log("video-toc-converter is running on port:" + app.get('port')); }); function sendProgress (id, progress) { for (var i = 0; i < connections.length; i++) { var c = connections[i]; if (c.id == id) { var obj = { progress: progress, id: id }; c.socket.emit("progress", obj); } } } function deleteFolderRecursive (path) { if (fs.existsSync(path)) { fs.readdirSync(path).forEach(function (file, index) { var curPath = path + "/" + file; var d = fs.statSync(curPath); if (d.isDirectory()) { deleteFolderRecursive(curPath); } else { // delete file fs.unlinkSync(curPath); } }); fs.rmdirSync(path); } } function makeAllPaths (dir)
function doConversion (options) { options.timestamp = Date.now(); options.idx = lunr(function () { this.field('title'); this.field('body'); }); var input = fs.readFileSync(options.path, "utf8"); var parseOptions = { delimiter: "\t", quote: "" }; parse(input, parseOptions, function(err, output) { if (!err) { processData(options, output); if (options.transcript_path) processTranscript(options); else doneWithTranscript(options); } else { console.log("error"); console.log(err); } }); } function processPosterImage (options) { if (options.posterFile) { var imageURI = new Datauri(options.posterFile); options.posterImageData = imageURI.content; } } function processData (options, data) { var toc = []; var lastPart = -1, lastLesson = -1, lastSublesson = -1, lastSubsublesson = -1, lastDepth = undefined; var last = [undefined, undefined, undefined, undefined]; var counters = [-1, -1, -1, -1]; for (var i = 0; i < data.length; i++) { var row = data[i]; var obj = {}; var parsed = { part: row[0], lesson: row[1], short: row[2], sublesson: row[3], subsublesson: row[4], filename: row[5], duration: row[6], isDisabled: row[7] }; var description = parsed.part; if (description == "") description = parsed.lesson; if (description == "") description = parsed.sublesson; if (description == "") description = parsed.subsublesson; parsed.description = description; obj.video = parsed.filename; var duration = parsed.duration; if (duration) { obj.isVideo = true; obj.duration = duration; } else { obj.isVideo = false; } var info = parseInfoFromText(parsed); parseDepthsFromFields(obj, info, last, counters); obj.short = info.short; obj.desc = info.desc; obj.disabled = parsed.isDisabled; if (obj.desc == "Learning Objectives") { obj.short = obj.lesson + ".0"; } var curDepth = []; curDepth.push(obj.part); curDepth.push(obj.lesson); curDepth.push(obj.sublesson); curDepth.push(obj.subsublesson); obj.depth = ""; for (var j = 0; j < curDepth.length; j++) { if (curDepth[j] != -1 && curDepth[j] != undefined) { if (obj.depth != "") obj.depth += ","; obj.depth += curDepth[j]; } } lastPart = obj.part; lastLesson = obj.lesson; lastSublesson = obj.sublesson; lastSubsublesson = obj.subsublesson; lastInfoLesson = info.lesson; lastDepth = curDepth; toc.push(obj); } options.toc = toc; processPosterImage(options); options.lastPart = lastPart; options.lastLesson = lastLesson; options.lastSublesson = lastSublesson; options.lastDepth = lastDepth; } function parseDepthsFromFields (obj, info, last, counters) { if (info.part != "" && info.part != last.part) { counters[0] = counters[0] + 1; counters[1] = counters[2] = counters[3] = -1; obj.part = counters[0]; last.part = info.part; } else if (info.lesson != "" && info.lesson != last.lesson) { counters[1] = counters[1] + 1; counters[2] = counters[3] = -1; obj.lesson = counters[1]; last.lesson = info.lesson; } else if (info.sublesson != "" && info.sublesson != last.sublesson) { counters[2] = counters[2] + 1; counters[3] = -1; obj.sublesson = counters[2]; last.sublesson = info.sublesson; } else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) { counters[3] = counters[3] + 1; obj.subsublesson = counters[3]; last.subsublesson = info.sublesson; } if (counters[0] != -1) obj.part = counters[0]; if (counters[1] != -1) obj.lesson = counters[1]; if (counters[2] != -1) obj.sublesson = counters[2]; if (counters[3] != -1) obj.subsublesson = counters[3]; } function generateJavascriptTOC (options) { var s = "define([], function () {\n"; var returnObj = { toc: [], markers: [] }; var lastTopLevel = undefined; if (options.lastDepth) { for (var i = 0; i < options.lastDepth.length; i++) { if (options.lastDepth[i] != undefined) { lastTopLevel = parseInt(options.lastDepth[i]); break; } } } for (var i = 0; i < options.toc.length; i++) { var entry = options.toc[i]; var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration}; if (entry.captions) obj.captions = entry.captions; if (entry.transcript) obj.transcript = entry.transcript; // add this TOC entry to the search index var doc = { "title": entry.desc, "id": i }; options.idx.add(doc); if (options.zipfiles) { /* // THEORY: lessons between 1 and n-1 get zipfile links var lessonNumber = parseInt(entry.lesson); if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) { var lessondigits = parseInt(entry.lesson); if (lessondigits < 10) lessondigits = "0" + lessondigits; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip"); } */ // NEW THEORY: top-level depths get zipfile links var depths = entry.depth.split(","); var count = 0; var first_level = undefined; for (var j = 0; j < depths.length; j++) { if (depths[j] != undefined) { count++; if (first_level == undefined) first_level = depths[j]; } } if (count == 1) { var d = parseInt(first_level); if (d > 0 && d < lastTopLevel) { if (d < 10) d = "0" + d; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip"); } } } if (entry.isVideo) { obj.video = path.join(options.mediaPath, entry.video.toLowerCase()); } else if (entry.video) { if (entry.video.toLowerCase().indexOf(".html") != -1) { obj.src = entry.video; } else { obj.video = entry.video; } } if (entry.disabled) { obj.disabled = true; } returnObj.toc.push(obj); } returnObj.projectTitle = options.title; returnObj.bannerDownloadLabel = options.bannerDownloadLabel; returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined; returnObj.posterImageData = options.posterImageData; options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});"; } function parseInfoFromText (params) { var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description }; var found = false; if (!found) { // look for: "Lesson _: Title" in filename reg = /^lesson (.*):\s(.*)/i; res = reg.exec(params.filename); if (res) { obj.short = res[1]; found = true; } } if (!found) { // X.Y Title in description reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/; res = reg.exec(params.description); if (res) { obj.short = res[1] + "." + res[2]; obj.desc = res[3]; found = true; } } return obj; } function streamToString (stream, cb) { var chunks = []; stream.on('data', function (chunk) { chunks.push(chunk); }); stream.on('end', function () { cb(chunks.join('')); }); } function processTranscript (options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; // unzip transcript zip // convert srt to vtt // associate videos with transcript files // add transcript (vtt or dbxf) to lunr search index // zip up vtt, dbxf, and search index yauzl.open(options.transcript_path, function (err, zipfile) { if (err) throw err; zipfile.on("close", function () { doneWithTranscript(options); }); zipfile.on("entry", function (entry) { if (/\/$/.test(entry.fileName)) { // directory file names end with '/' return; } zipfile.openReadStream(entry, function (err, readStream) { if (err) throw err; readStream.setEncoding('utf8'); // process the srt files if (entry.fileName.indexOf(".srt") != -1) { // THEORY: find the toc video file that most closely matches this srt file var tocReference = findTOCReference(options.toc, entry.fileName); var newFilename = entry.fileName.replace(".srt", ".vtt"); streamToString(readStream, function (s) { var writePath = path.join(targetDir + "/media/vtt/", newFilename); var filePath = path.dirname(writePath); makeAllPaths(filePath); s = s.replace(/\r/g, ""); var searchableText = ""; var output = "WEBVTT\n\n"; var lines = s.split("\n"); var count = 0; for (var i = 0; i < lines.length; i++) { var line = lines[i]; if (line == "") count = 0; else count++; if (count == 2) { // replace commas in timing lines with periods line = line.replace(/,/g, "."); // add line position to move the cue up a little (CSS was ineffective) line += " line:80%"; } else if (count > 2) { searchableText += line; } output += line + "\n"; } output = output.trim(); fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"}); if (tocReference) { var doc = { "title": tocReference.title, "body": searchableText, "id": tocReference.index }; options.toc[tocReference.index].captions = "media/vtt/" + newFilename; var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp"; options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename; options.idx.add(doc); } }); } else if (entry.fileName.indexOf(".dfxp") != -1) { var writePath = path.join(targetDir + "/media/transcript/", entry.fileName); // ensure parent directory exists var filePath = path.dirname(writePath); makeAllPaths(filePath); // write file readStream.pipe(fs.createWriteStream(writePath)); } }); }); }); } function findTOCReference (toc, filename) { var file = path.basename(filename, path.extname(filename)); // assuming the transcript file is in this format: 9780789756350-02_04_01.vtt var dash = file.indexOf("-"); if (dash != -1) { file = file.substr(dash + 1); } if (file) { for (var i = 0; i < toc.length; i++) { var entry = toc[i]; if (entry.video && entry.video.indexOf(file) != -1) { return { title: entry.desc, index: i } } } } return undefined; } function includeSearch (archive, options) { var search_index = JSON.stringify(options.idx.toJSON()); var search_module = "define([], function () { return " + search_index + "; });"; archive.append(search_module, { name: "/search_index.js" }); } function includeTranscriptFolders (archive, options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; makeAllPaths(targetDir + "media/vtt/"); archive.directory(targetDir + "media/vtt/", "/media/vtt/"); makeAllPaths(targetDir + "media/transcript/"); archive.directory(targetDir + "media/transcript/", "/media/transcript/"); } function doneWithTranscript (options) { generateJavascriptTOC(options); writeZip(options); } function completelyDone (options) { sendProgress(options.id, 100); options.response.json({"link": options.outputFile}); } function writeZip (options) { var returnDir = options.name + options.timestamp; var targetDir = path.join("temp", returnDir, "output"); var outputFile = "conversions/" + returnDir + ".zip"; var outputPath = "public/" + outputFile; var dir = path.dirname(outputPath); makeAllPaths(dir); var folder = options.title; var outputStream = fs.createWriteStream(outputPath); var archive = archiver('zip'); outputStream.on("close", function () { doneWithZip(options, outputStream); }); archive.pipe(outputStream); archive.append(options.tocJS, { name: "/toc.js"}); includeViewer(archive, options); includeSearch(archive, options); includeTranscriptFolders(archive, options); archive.finalize(); options.outputFile = outputFile; } function doneWithZip (options, outputStream) { if (outputStream) { doCleanup(options, outputStream); } completelyDone(options); } function doCleanup (options, outputStream) { console.log("cleaning up"); var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; outputStream.close(); fs.unlinkSync(options.path); deleteFolderRecursive(targetDir); console.log("done"); } function includeViewer (archive, options) { archive.file("public/runcourse.html", { name: "/runcourse.html" }); archive.file("public/viewer.js", { name: "/viewer.js" }); var settings = { title: options.title, type: "metadata", infinite_scrolling: false, skin: options.skin }; if (options.sampleMode) { settings.buyButton = options.sampleModeLink; var pw = fs.readFileSync("public/paywall.html").toString(); pw = pw.replace("path-to-buy", options.sampleModeLink); archive.append(pw, { name: "/paywall.html" }); } var settings_string = JSON.stringify(settings); var manifest = "define([], function () { return " + settings_string + "; });"; archive.append(manifest, { name: "/manifest.js" }); }
{ var paths = dir.split(path.sep); var curPath = ""; for (var i = 0; i < paths.length; i++) { curPath += paths[i] + path.sep; try { fs.accessSync(curPath, fs.W_OK); } catch (err) { fs.mkdirSync(curPath); } } }
identifier_body
app.js
var fs = require('fs'); var archiver = require('archiver'); var express = require('express'); var multer = require('multer'); var path = require('path'); var cheerio = require('cheerio'); var es = require('event-stream'); var parse = require('csv-parse'); var Datauri = require('datauri'); var lunr = require('lunr'); var yauzl = require("yauzl"); var app = express(); app.use(multer({ dest: './uploads/', onFileUploadComplete: function (file, request, response) { // NOTE: request and response were null (?) } })); app.set('port', (process.env.PORT || 5010)); app.use(express.static('public')); app.post('/upload', function(request, response) { console.log("request received:"); console.log(request.files); if (request.files && request.files.file) { console.log("Upload received " + request.files.file.originalname); var posterFile; if (request.files.posterFile) { posterFile = request.files.posterFile.path; } var transcriptFile; if (request.files.transcript_zip) { transcriptFile = request.files.transcript_zip.path; } //console.log(request.body); // form fields //console.log(request.files); // form files var converted = doConversion({ name: request.files.file.originalname, path: request.files.file.path, transcript_path: transcriptFile, posterFile: posterFile, response: response, id: request.body.requestid, title: request.body.title, mediaPath: request.body.path == undefined ? "" : request.body.path, zipfiles: request.body.zipfiles, bannerDownloadLabel: request.body.bannerDownloadLabel, bannerDownloadLink: request.body.bannerDownloadLink, /*courseZipfile: request.body.courseZipfile,*/ sampleMode: request.body.sampleMode, sampleModeLink: request.body.sampleModeLink, isbn: request.body.isbn, skin: request.body.skin }); } }); var http = require('http'); var server = http.Server(app); var io = require('socket.io')(server); var connections = []; io.on('connection', function (socket) { socket.on("id", function (id) { connections.push( { socket: socket, id: id }); }); }); server.listen((process.env.PORT || 5010), function () { console.log("video-toc-converter is running on port:" + app.get('port')); }); function sendProgress (id, progress) { for (var i = 0; i < connections.length; i++) { var c = connections[i]; if (c.id == id) { var obj = { progress: progress, id: id }; c.socket.emit("progress", obj); } } } function deleteFolderRecursive (path) { if (fs.existsSync(path)) { fs.readdirSync(path).forEach(function (file, index) { var curPath = path + "/" + file; var d = fs.statSync(curPath); if (d.isDirectory()) { deleteFolderRecursive(curPath); } else { // delete file fs.unlinkSync(curPath); } }); fs.rmdirSync(path); } } function makeAllPaths (dir) { var paths = dir.split(path.sep); var curPath = ""; for (var i = 0; i < paths.length; i++) { curPath += paths[i] + path.sep; try { fs.accessSync(curPath, fs.W_OK); } catch (err) { fs.mkdirSync(curPath); } } } function doConversion (options) { options.timestamp = Date.now(); options.idx = lunr(function () { this.field('title'); this.field('body'); }); var input = fs.readFileSync(options.path, "utf8"); var parseOptions = { delimiter: "\t", quote: "" }; parse(input, parseOptions, function(err, output) { if (!err) { processData(options, output); if (options.transcript_path) processTranscript(options); else doneWithTranscript(options); } else { console.log("error"); console.log(err); } }); } function processPosterImage (options) { if (options.posterFile) { var imageURI = new Datauri(options.posterFile); options.posterImageData = imageURI.content; } } function processData (options, data) { var toc = []; var lastPart = -1, lastLesson = -1, lastSublesson = -1, lastSubsublesson = -1, lastDepth = undefined; var last = [undefined, undefined, undefined, undefined]; var counters = [-1, -1, -1, -1]; for (var i = 0; i < data.length; i++) { var row = data[i]; var obj = {}; var parsed = { part: row[0], lesson: row[1], short: row[2], sublesson: row[3], subsublesson: row[4], filename: row[5], duration: row[6], isDisabled: row[7] }; var description = parsed.part; if (description == "") description = parsed.lesson; if (description == "") description = parsed.sublesson; if (description == "") description = parsed.subsublesson; parsed.description = description; obj.video = parsed.filename; var duration = parsed.duration; if (duration) { obj.isVideo = true; obj.duration = duration; } else { obj.isVideo = false; } var info = parseInfoFromText(parsed); parseDepthsFromFields(obj, info, last, counters); obj.short = info.short; obj.desc = info.desc; obj.disabled = parsed.isDisabled; if (obj.desc == "Learning Objectives") { obj.short = obj.lesson + ".0"; } var curDepth = []; curDepth.push(obj.part); curDepth.push(obj.lesson); curDepth.push(obj.sublesson); curDepth.push(obj.subsublesson); obj.depth = ""; for (var j = 0; j < curDepth.length; j++) { if (curDepth[j] != -1 && curDepth[j] != undefined) { if (obj.depth != "") obj.depth += ","; obj.depth += curDepth[j]; } } lastPart = obj.part; lastLesson = obj.lesson; lastSublesson = obj.sublesson; lastSubsublesson = obj.subsublesson; lastInfoLesson = info.lesson; lastDepth = curDepth; toc.push(obj); } options.toc = toc; processPosterImage(options); options.lastPart = lastPart; options.lastLesson = lastLesson; options.lastSublesson = lastSublesson; options.lastDepth = lastDepth; } function parseDepthsFromFields (obj, info, last, counters) { if (info.part != "" && info.part != last.part) { counters[0] = counters[0] + 1; counters[1] = counters[2] = counters[3] = -1; obj.part = counters[0]; last.part = info.part; } else if (info.lesson != "" && info.lesson != last.lesson) { counters[1] = counters[1] + 1; counters[2] = counters[3] = -1; obj.lesson = counters[1]; last.lesson = info.lesson; } else if (info.sublesson != "" && info.sublesson != last.sublesson) { counters[2] = counters[2] + 1; counters[3] = -1; obj.sublesson = counters[2]; last.sublesson = info.sublesson; } else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) { counters[3] = counters[3] + 1; obj.subsublesson = counters[3]; last.subsublesson = info.sublesson; } if (counters[0] != -1) obj.part = counters[0]; if (counters[1] != -1) obj.lesson = counters[1]; if (counters[2] != -1) obj.sublesson = counters[2]; if (counters[3] != -1) obj.subsublesson = counters[3]; } function generateJavascriptTOC (options) { var s = "define([], function () {\n"; var returnObj = { toc: [], markers: [] }; var lastTopLevel = undefined; if (options.lastDepth) { for (var i = 0; i < options.lastDepth.length; i++) { if (options.lastDepth[i] != undefined) { lastTopLevel = parseInt(options.lastDepth[i]); break; } } } for (var i = 0; i < options.toc.length; i++) { var entry = options.toc[i]; var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration}; if (entry.captions) obj.captions = entry.captions; if (entry.transcript) obj.transcript = entry.transcript; // add this TOC entry to the search index var doc = { "title": entry.desc, "id": i }; options.idx.add(doc); if (options.zipfiles) { /* // THEORY: lessons between 1 and n-1 get zipfile links var lessonNumber = parseInt(entry.lesson); if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) { var lessondigits = parseInt(entry.lesson); if (lessondigits < 10) lessondigits = "0" + lessondigits; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip"); } */ // NEW THEORY: top-level depths get zipfile links var depths = entry.depth.split(","); var count = 0; var first_level = undefined; for (var j = 0; j < depths.length; j++) { if (depths[j] != undefined) { count++; if (first_level == undefined) first_level = depths[j]; } } if (count == 1) { var d = parseInt(first_level); if (d > 0 && d < lastTopLevel) { if (d < 10) d = "0" + d; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip"); } } } if (entry.isVideo) { obj.video = path.join(options.mediaPath, entry.video.toLowerCase()); } else if (entry.video) { if (entry.video.toLowerCase().indexOf(".html") != -1) { obj.src = entry.video; } else { obj.video = entry.video; } } if (entry.disabled) { obj.disabled = true; } returnObj.toc.push(obj); } returnObj.projectTitle = options.title; returnObj.bannerDownloadLabel = options.bannerDownloadLabel; returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined; returnObj.posterImageData = options.posterImageData; options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});"; } function parseInfoFromText (params) { var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description }; var found = false; if (!found) { // look for: "Lesson _: Title" in filename reg = /^lesson (.*):\s(.*)/i; res = reg.exec(params.filename); if (res) { obj.short = res[1]; found = true; } } if (!found) { // X.Y Title in description reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/; res = reg.exec(params.description); if (res) { obj.short = res[1] + "." + res[2]; obj.desc = res[3]; found = true; } } return obj; } function streamToString (stream, cb) { var chunks = []; stream.on('data', function (chunk) { chunks.push(chunk); }); stream.on('end', function () { cb(chunks.join('')); }); } function processTranscript (options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; // unzip transcript zip // convert srt to vtt // associate videos with transcript files // add transcript (vtt or dbxf) to lunr search index // zip up vtt, dbxf, and search index yauzl.open(options.transcript_path, function (err, zipfile) { if (err) throw err; zipfile.on("close", function () { doneWithTranscript(options); }); zipfile.on("entry", function (entry) { if (/\/$/.test(entry.fileName)) { // directory file names end with '/' return; } zipfile.openReadStream(entry, function (err, readStream) { if (err) throw err; readStream.setEncoding('utf8'); // process the srt files if (entry.fileName.indexOf(".srt") != -1) { // THEORY: find the toc video file that most closely matches this srt file var tocReference = findTOCReference(options.toc, entry.fileName); var newFilename = entry.fileName.replace(".srt", ".vtt"); streamToString(readStream, function (s) { var writePath = path.join(targetDir + "/media/vtt/", newFilename); var filePath = path.dirname(writePath); makeAllPaths(filePath); s = s.replace(/\r/g, ""); var searchableText = ""; var output = "WEBVTT\n\n"; var lines = s.split("\n"); var count = 0; for (var i = 0; i < lines.length; i++) { var line = lines[i]; if (line == "") count = 0; else count++; if (count == 2) { // replace commas in timing lines with periods line = line.replace(/,/g, "."); // add line position to move the cue up a little (CSS was ineffective) line += " line:80%"; } else if (count > 2) { searchableText += line; } output += line + "\n"; } output = output.trim(); fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"}); if (tocReference) { var doc = { "title": tocReference.title, "body": searchableText, "id": tocReference.index }; options.toc[tocReference.index].captions = "media/vtt/" + newFilename; var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp"; options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename; options.idx.add(doc); } }); } else if (entry.fileName.indexOf(".dfxp") != -1) { var writePath = path.join(targetDir + "/media/transcript/", entry.fileName); // ensure parent directory exists var filePath = path.dirname(writePath); makeAllPaths(filePath); // write file readStream.pipe(fs.createWriteStream(writePath)); } }); }); }); } function findTOCReference (toc, filename) { var file = path.basename(filename, path.extname(filename)); // assuming the transcript file is in this format: 9780789756350-02_04_01.vtt var dash = file.indexOf("-"); if (dash != -1) { file = file.substr(dash + 1); } if (file) { for (var i = 0; i < toc.length; i++) { var entry = toc[i]; if (entry.video && entry.video.indexOf(file) != -1) { return { title: entry.desc, index: i } } } } return undefined; } function
(archive, options) { var search_index = JSON.stringify(options.idx.toJSON()); var search_module = "define([], function () { return " + search_index + "; });"; archive.append(search_module, { name: "/search_index.js" }); } function includeTranscriptFolders (archive, options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; makeAllPaths(targetDir + "media/vtt/"); archive.directory(targetDir + "media/vtt/", "/media/vtt/"); makeAllPaths(targetDir + "media/transcript/"); archive.directory(targetDir + "media/transcript/", "/media/transcript/"); } function doneWithTranscript (options) { generateJavascriptTOC(options); writeZip(options); } function completelyDone (options) { sendProgress(options.id, 100); options.response.json({"link": options.outputFile}); } function writeZip (options) { var returnDir = options.name + options.timestamp; var targetDir = path.join("temp", returnDir, "output"); var outputFile = "conversions/" + returnDir + ".zip"; var outputPath = "public/" + outputFile; var dir = path.dirname(outputPath); makeAllPaths(dir); var folder = options.title; var outputStream = fs.createWriteStream(outputPath); var archive = archiver('zip'); outputStream.on("close", function () { doneWithZip(options, outputStream); }); archive.pipe(outputStream); archive.append(options.tocJS, { name: "/toc.js"}); includeViewer(archive, options); includeSearch(archive, options); includeTranscriptFolders(archive, options); archive.finalize(); options.outputFile = outputFile; } function doneWithZip (options, outputStream) { if (outputStream) { doCleanup(options, outputStream); } completelyDone(options); } function doCleanup (options, outputStream) { console.log("cleaning up"); var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; outputStream.close(); fs.unlinkSync(options.path); deleteFolderRecursive(targetDir); console.log("done"); } function includeViewer (archive, options) { archive.file("public/runcourse.html", { name: "/runcourse.html" }); archive.file("public/viewer.js", { name: "/viewer.js" }); var settings = { title: options.title, type: "metadata", infinite_scrolling: false, skin: options.skin }; if (options.sampleMode) { settings.buyButton = options.sampleModeLink; var pw = fs.readFileSync("public/paywall.html").toString(); pw = pw.replace("path-to-buy", options.sampleModeLink); archive.append(pw, { name: "/paywall.html" }); } var settings_string = JSON.stringify(settings); var manifest = "define([], function () { return " + settings_string + "; });"; archive.append(manifest, { name: "/manifest.js" }); }
includeSearch
identifier_name
app.js
var fs = require('fs'); var archiver = require('archiver'); var express = require('express'); var multer = require('multer'); var path = require('path'); var cheerio = require('cheerio'); var es = require('event-stream'); var parse = require('csv-parse'); var Datauri = require('datauri'); var lunr = require('lunr'); var yauzl = require("yauzl"); var app = express(); app.use(multer({ dest: './uploads/', onFileUploadComplete: function (file, request, response) { // NOTE: request and response were null (?) } })); app.set('port', (process.env.PORT || 5010)); app.use(express.static('public')); app.post('/upload', function(request, response) { console.log("request received:"); console.log(request.files); if (request.files && request.files.file) { console.log("Upload received " + request.files.file.originalname); var posterFile; if (request.files.posterFile) { posterFile = request.files.posterFile.path; } var transcriptFile; if (request.files.transcript_zip) { transcriptFile = request.files.transcript_zip.path; } //console.log(request.body); // form fields //console.log(request.files); // form files var converted = doConversion({ name: request.files.file.originalname, path: request.files.file.path, transcript_path: transcriptFile, posterFile: posterFile, response: response, id: request.body.requestid, title: request.body.title, mediaPath: request.body.path == undefined ? "" : request.body.path, zipfiles: request.body.zipfiles, bannerDownloadLabel: request.body.bannerDownloadLabel, bannerDownloadLink: request.body.bannerDownloadLink, /*courseZipfile: request.body.courseZipfile,*/ sampleMode: request.body.sampleMode, sampleModeLink: request.body.sampleModeLink, isbn: request.body.isbn, skin: request.body.skin }); } }); var http = require('http'); var server = http.Server(app); var io = require('socket.io')(server); var connections = []; io.on('connection', function (socket) { socket.on("id", function (id) { connections.push( { socket: socket, id: id }); }); }); server.listen((process.env.PORT || 5010), function () { console.log("video-toc-converter is running on port:" + app.get('port')); }); function sendProgress (id, progress) { for (var i = 0; i < connections.length; i++) { var c = connections[i]; if (c.id == id) { var obj = { progress: progress, id: id }; c.socket.emit("progress", obj); } } } function deleteFolderRecursive (path) { if (fs.existsSync(path)) { fs.readdirSync(path).forEach(function (file, index) { var curPath = path + "/" + file; var d = fs.statSync(curPath); if (d.isDirectory()) { deleteFolderRecursive(curPath); } else { // delete file fs.unlinkSync(curPath); } }); fs.rmdirSync(path); } } function makeAllPaths (dir) { var paths = dir.split(path.sep); var curPath = ""; for (var i = 0; i < paths.length; i++) { curPath += paths[i] + path.sep; try { fs.accessSync(curPath, fs.W_OK); } catch (err) { fs.mkdirSync(curPath); } } } function doConversion (options) { options.timestamp = Date.now(); options.idx = lunr(function () { this.field('title'); this.field('body'); }); var input = fs.readFileSync(options.path, "utf8"); var parseOptions = { delimiter: "\t", quote: "" }; parse(input, parseOptions, function(err, output) { if (!err) { processData(options, output); if (options.transcript_path) processTranscript(options); else doneWithTranscript(options); } else { console.log("error"); console.log(err); } }); } function processPosterImage (options) { if (options.posterFile) { var imageURI = new Datauri(options.posterFile); options.posterImageData = imageURI.content; } } function processData (options, data) { var toc = []; var lastPart = -1, lastLesson = -1, lastSublesson = -1, lastSubsublesson = -1, lastDepth = undefined; var last = [undefined, undefined, undefined, undefined]; var counters = [-1, -1, -1, -1]; for (var i = 0; i < data.length; i++) { var row = data[i]; var obj = {}; var parsed = { part: row[0], lesson: row[1], short: row[2], sublesson: row[3], subsublesson: row[4], filename: row[5], duration: row[6], isDisabled: row[7] }; var description = parsed.part; if (description == "") description = parsed.lesson; if (description == "") description = parsed.sublesson; if (description == "") description = parsed.subsublesson; parsed.description = description; obj.video = parsed.filename; var duration = parsed.duration; if (duration) { obj.isVideo = true; obj.duration = duration; } else { obj.isVideo = false; } var info = parseInfoFromText(parsed); parseDepthsFromFields(obj, info, last, counters); obj.short = info.short; obj.desc = info.desc; obj.disabled = parsed.isDisabled; if (obj.desc == "Learning Objectives") { obj.short = obj.lesson + ".0"; } var curDepth = []; curDepth.push(obj.part); curDepth.push(obj.lesson); curDepth.push(obj.sublesson); curDepth.push(obj.subsublesson); obj.depth = ""; for (var j = 0; j < curDepth.length; j++) { if (curDepth[j] != -1 && curDepth[j] != undefined) { if (obj.depth != "") obj.depth += ","; obj.depth += curDepth[j]; } } lastPart = obj.part; lastLesson = obj.lesson; lastSublesson = obj.sublesson; lastSubsublesson = obj.subsublesson; lastInfoLesson = info.lesson; lastDepth = curDepth; toc.push(obj); } options.toc = toc; processPosterImage(options); options.lastPart = lastPart; options.lastLesson = lastLesson; options.lastSublesson = lastSublesson; options.lastDepth = lastDepth; } function parseDepthsFromFields (obj, info, last, counters) { if (info.part != "" && info.part != last.part) { counters[0] = counters[0] + 1; counters[1] = counters[2] = counters[3] = -1; obj.part = counters[0]; last.part = info.part; } else if (info.lesson != "" && info.lesson != last.lesson) { counters[1] = counters[1] + 1; counters[2] = counters[3] = -1; obj.lesson = counters[1]; last.lesson = info.lesson; } else if (info.sublesson != "" && info.sublesson != last.sublesson) { counters[2] = counters[2] + 1; counters[3] = -1; obj.sublesson = counters[2]; last.sublesson = info.sublesson; } else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) { counters[3] = counters[3] + 1; obj.subsublesson = counters[3]; last.subsublesson = info.sublesson; } if (counters[0] != -1) obj.part = counters[0]; if (counters[1] != -1) obj.lesson = counters[1]; if (counters[2] != -1) obj.sublesson = counters[2]; if (counters[3] != -1) obj.subsublesson = counters[3]; } function generateJavascriptTOC (options) { var s = "define([], function () {\n"; var returnObj = { toc: [], markers: [] }; var lastTopLevel = undefined; if (options.lastDepth) { for (var i = 0; i < options.lastDepth.length; i++) { if (options.lastDepth[i] != undefined) { lastTopLevel = parseInt(options.lastDepth[i]); break; } } } for (var i = 0; i < options.toc.length; i++) { var entry = options.toc[i]; var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration}; if (entry.captions) obj.captions = entry.captions; if (entry.transcript) obj.transcript = entry.transcript; // add this TOC entry to the search index var doc = { "title": entry.desc, "id": i }; options.idx.add(doc); if (options.zipfiles) { /* // THEORY: lessons between 1 and n-1 get zipfile links var lessonNumber = parseInt(entry.lesson); if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) { var lessondigits = parseInt(entry.lesson); if (lessondigits < 10) lessondigits = "0" + lessondigits; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip"); } */ // NEW THEORY: top-level depths get zipfile links var depths = entry.depth.split(","); var count = 0; var first_level = undefined; for (var j = 0; j < depths.length; j++) { if (depths[j] != undefined) { count++; if (first_level == undefined) first_level = depths[j]; } } if (count == 1) { var d = parseInt(first_level); if (d > 0 && d < lastTopLevel) { if (d < 10) d = "0" + d; obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip"); } } } if (entry.isVideo) { obj.video = path.join(options.mediaPath, entry.video.toLowerCase()); } else if (entry.video) { if (entry.video.toLowerCase().indexOf(".html") != -1) { obj.src = entry.video; } else { obj.video = entry.video; } } if (entry.disabled) { obj.disabled = true; } returnObj.toc.push(obj); } returnObj.projectTitle = options.title; returnObj.bannerDownloadLabel = options.bannerDownloadLabel; returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined; returnObj.posterImageData = options.posterImageData; options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});"; } function parseInfoFromText (params) { var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description }; var found = false; if (!found) { // look for: "Lesson _: Title" in filename reg = /^lesson (.*):\s(.*)/i; res = reg.exec(params.filename); if (res) { obj.short = res[1]; found = true; } } if (!found) { // X.Y Title in description reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/; res = reg.exec(params.description); if (res) { obj.short = res[1] + "." + res[2]; obj.desc = res[3]; found = true; } } return obj; } function streamToString (stream, cb) { var chunks = []; stream.on('data', function (chunk) { chunks.push(chunk); }); stream.on('end', function () { cb(chunks.join('')); }); } function processTranscript (options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; // unzip transcript zip // convert srt to vtt // associate videos with transcript files // add transcript (vtt or dbxf) to lunr search index // zip up vtt, dbxf, and search index yauzl.open(options.transcript_path, function (err, zipfile) { if (err) throw err; zipfile.on("close", function () { doneWithTranscript(options); }); zipfile.on("entry", function (entry) { if (/\/$/.test(entry.fileName)) { // directory file names end with '/' return; } zipfile.openReadStream(entry, function (err, readStream) { if (err) throw err; readStream.setEncoding('utf8'); // process the srt files if (entry.fileName.indexOf(".srt") != -1) { // THEORY: find the toc video file that most closely matches this srt file var tocReference = findTOCReference(options.toc, entry.fileName); var newFilename = entry.fileName.replace(".srt", ".vtt"); streamToString(readStream, function (s) { var writePath = path.join(targetDir + "/media/vtt/", newFilename); var filePath = path.dirname(writePath); makeAllPaths(filePath); s = s.replace(/\r/g, ""); var searchableText = ""; var output = "WEBVTT\n\n"; var lines = s.split("\n"); var count = 0; for (var i = 0; i < lines.length; i++) { var line = lines[i]; if (line == "") count = 0; else count++; if (count == 2) { // replace commas in timing lines with periods line = line.replace(/,/g, "."); // add line position to move the cue up a little (CSS was ineffective) line += " line:80%"; } else if (count > 2) { searchableText += line; } output += line + "\n"; } output = output.trim(); fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"}); if (tocReference) { var doc = { "title": tocReference.title, "body": searchableText, "id": tocReference.index }; options.toc[tocReference.index].captions = "media/vtt/" + newFilename;
options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename; options.idx.add(doc); } }); } else if (entry.fileName.indexOf(".dfxp") != -1) { var writePath = path.join(targetDir + "/media/transcript/", entry.fileName); // ensure parent directory exists var filePath = path.dirname(writePath); makeAllPaths(filePath); // write file readStream.pipe(fs.createWriteStream(writePath)); } }); }); }); } function findTOCReference (toc, filename) { var file = path.basename(filename, path.extname(filename)); // assuming the transcript file is in this format: 9780789756350-02_04_01.vtt var dash = file.indexOf("-"); if (dash != -1) { file = file.substr(dash + 1); } if (file) { for (var i = 0; i < toc.length; i++) { var entry = toc[i]; if (entry.video && entry.video.indexOf(file) != -1) { return { title: entry.desc, index: i } } } } return undefined; } function includeSearch (archive, options) { var search_index = JSON.stringify(options.idx.toJSON()); var search_module = "define([], function () { return " + search_index + "; });"; archive.append(search_module, { name: "/search_index.js" }); } function includeTranscriptFolders (archive, options) { var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; makeAllPaths(targetDir + "media/vtt/"); archive.directory(targetDir + "media/vtt/", "/media/vtt/"); makeAllPaths(targetDir + "media/transcript/"); archive.directory(targetDir + "media/transcript/", "/media/transcript/"); } function doneWithTranscript (options) { generateJavascriptTOC(options); writeZip(options); } function completelyDone (options) { sendProgress(options.id, 100); options.response.json({"link": options.outputFile}); } function writeZip (options) { var returnDir = options.name + options.timestamp; var targetDir = path.join("temp", returnDir, "output"); var outputFile = "conversions/" + returnDir + ".zip"; var outputPath = "public/" + outputFile; var dir = path.dirname(outputPath); makeAllPaths(dir); var folder = options.title; var outputStream = fs.createWriteStream(outputPath); var archive = archiver('zip'); outputStream.on("close", function () { doneWithZip(options, outputStream); }); archive.pipe(outputStream); archive.append(options.tocJS, { name: "/toc.js"}); includeViewer(archive, options); includeSearch(archive, options); includeTranscriptFolders(archive, options); archive.finalize(); options.outputFile = outputFile; } function doneWithZip (options, outputStream) { if (outputStream) { doCleanup(options, outputStream); } completelyDone(options); } function doCleanup (options, outputStream) { console.log("cleaning up"); var returnDir = options.name + options.timestamp; var targetDir = "temp/" + returnDir + "/"; outputStream.close(); fs.unlinkSync(options.path); deleteFolderRecursive(targetDir); console.log("done"); } function includeViewer (archive, options) { archive.file("public/runcourse.html", { name: "/runcourse.html" }); archive.file("public/viewer.js", { name: "/viewer.js" }); var settings = { title: options.title, type: "metadata", infinite_scrolling: false, skin: options.skin }; if (options.sampleMode) { settings.buyButton = options.sampleModeLink; var pw = fs.readFileSync("public/paywall.html").toString(); pw = pw.replace("path-to-buy", options.sampleModeLink); archive.append(pw, { name: "/paywall.html" }); } var settings_string = JSON.stringify(settings); var manifest = "define([], function () { return " + settings_string + "; });"; archive.append(manifest, { name: "/manifest.js" }); }
var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp";
random_line_split
retransmit_stage.rs
//! The `retransmit_stage` retransmits shreds between validators #![allow(clippy::rc_buffer)] use { crate::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, cluster_info_vote_listener::VerifiedVoteReceiver, cluster_nodes::ClusterNodesCache, cluster_slots::ClusterSlots, cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver}, completed_data_sets_service::CompletedDataSetsSender, packet_hasher::PacketHasher, repair_service::{DuplicateSlotsResetSender, RepairInfo}, window_service::{should_retransmit_and_persist, WindowService}, }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, lru::LruCache, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, solana_client::rpc_response::SlotUpdate, solana_gossip::{ cluster_info::{ClusterInfo, DATA_PLANE_FANOUT}, contact_info::ContactInfo, }, solana_ledger::{ blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::{Shred, ShredId}, }, solana_measure::measure::Measure, solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}, solana_streamer::sendmmsg::{multi_target_send, SendPktsError}, std::{ collections::{BTreeSet, HashMap, HashSet}, net::UdpSocket, ops::{AddAssign, DerefMut}, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, }; const MAX_DUPLICATE_COUNT: usize = 2; const DEFAULT_LRU_SIZE: usize = 10_000; const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8; const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5); #[derive(Default)] struct RetransmitSlotStats { num_shreds: usize, num_nodes: usize, } impl AddAssign for RetransmitSlotStats { fn add_assign(&mut self, other: Self) { *self = Self { num_shreds: self.num_shreds + other.num_shreds, num_nodes: self.num_nodes + other.num_nodes, } } } #[derive(Default)] struct RetransmitStats { since: Option<Instant>, num_nodes: AtomicUsize, num_addrs_failed: AtomicUsize, num_shreds: usize, num_shreds_skipped: AtomicUsize, total_batches: usize, total_time: u64, epoch_fetch: u64, epoch_cache_update: u64, retransmit_total: AtomicU64, compute_turbine_peers_total: AtomicU64, slot_stats: HashMap<Slot, RetransmitSlotStats>, unknown_shred_slot_leader: AtomicUsize, } impl RetransmitStats { fn maybe_submit( &mut self, root_bank: &Bank, working_bank: &Bank, cluster_info: &ClusterInfo, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, ) { const SUBMIT_CADENCE: Duration = Duration::from_secs(2); let elapsed = self.since.as_ref().map(Instant::elapsed); if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE { return; } let num_peers = cluster_nodes_cache .get(root_bank.slot(), root_bank, working_bank, cluster_info) .num_peers(); let stats = std::mem::replace( self, Self { since: Some(Instant::now()), ..Self::default() }, ); datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64)); datapoint_info!( "retransmit-stage", ("total_time", stats.total_time, i64), ("epoch_fetch", stats.epoch_fetch, i64), ("epoch_cache_update", stats.epoch_cache_update, i64), ("total_batches", stats.total_batches, i64), ("num_nodes", stats.num_nodes.into_inner(), i64), ("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64), ("num_shreds", stats.num_shreds, i64), ( "num_shreds_skipped", stats.num_shreds_skipped.into_inner(), i64 ), ("retransmit_total", stats.retransmit_total.into_inner(), i64), ( "compute_turbine", stats.compute_turbine_peers_total.into_inner(), i64 ), ( "unknown_shred_slot_leader", stats.unknown_shred_slot_leader.into_inner(), i64 ), ); for (slot, stats) in stats.slot_stats { datapoint_info!( "retransmit-stage-slot-stats", ("slot", slot, i64), ("num_shreds", stats.num_shreds, i64), ("num_nodes", stats.num_nodes, i64), ); } } } // Map of shred (slot, index, type) => list of hash values seen for that key. type ShredFilter = LruCache<ShredId, Vec<u64>>; type ShredFilterAndHasher = (ShredFilter, PacketHasher); // Returns true if shred is already received and should skip retransmit. fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool { let key = shred.id(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); match cache.get_mut(&key) { Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true, Some(sent) => { let hash = hasher.hash_shred(shred); if sent.contains(&hash) { true } else { sent.push(hash); false } } None => { let hash = hasher.hash_shred(shred); cache.put(key, vec![hash]); false } } } // Returns true if this is the first time receiving a shred for `shred_slot`. fn check_if_first_shred_received( shred_slot: Slot, first_shreds_received: &Mutex<BTreeSet<Slot>>, root_bank: &Bank, ) -> bool { if shred_slot <= root_bank.slot() { return false; } let mut first_shreds_received_locked = first_shreds_received.lock().unwrap(); if first_shreds_received_locked.insert(shred_slot) { datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64)); if first_shreds_received_locked.len() > 100
true } else { false } } fn maybe_reset_shreds_received_cache( shreds_received: &Mutex<ShredFilterAndHasher>, hasher_reset_ts: &mut Instant, ) { const UPDATE_INTERVAL: Duration = Duration::from_secs(1); if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL { *hasher_reset_ts = Instant::now(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); cache.clear(); hasher.reset(); } } #[allow(clippy::too_many_arguments)] fn retransmit( thread_pool: &ThreadPool, bank_forks: &RwLock<BankForks>, leader_schedule_cache: &LeaderScheduleCache, cluster_info: &ClusterInfo, shreds_receiver: &Receiver<Vec<Shred>>, sockets: &[UdpSocket], stats: &mut RetransmitStats, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, hasher_reset_ts: &mut Instant, shreds_received: &Mutex<ShredFilterAndHasher>, max_slots: &MaxSlots, first_shreds_received: &Mutex<BTreeSet<Slot>>, rpc_subscriptions: Option<&RpcSubscriptions>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; let mut timer_start = Measure::start("retransmit"); shreds.extend(shreds_receiver.try_iter().flatten()); stats.num_shreds += shreds.len(); stats.total_batches += 1; let mut epoch_fetch = Measure::start("retransmit_epoch_fetch"); let (working_bank, root_bank) = { let bank_forks = bank_forks.read().unwrap(); (bank_forks.working_bank(), bank_forks.root_bank()) }; epoch_fetch.stop(); stats.epoch_fetch += epoch_fetch.as_us(); let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update"); maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts); epoch_cache_update.stop(); stats.epoch_cache_update += epoch_cache_update.as_us(); let socket_addr_space = cluster_info.socket_addr_space(); let retransmit_shred = |shred: &Shred, socket: &UdpSocket| { if should_skip_retransmit(shred, shreds_received) { stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed); return 0; } let shred_slot = shred.slot(); max_slots .retransmit .fetch_max(shred_slot, Ordering::Relaxed); if let Some(rpc_subscriptions) = rpc_subscriptions { if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) { rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived { slot: shred_slot, timestamp: timestamp(), }); } } let mut compute_turbine_peers = Measure::start("turbine_start"); // TODO: consider using root-bank here for leader lookup! // Shreds' signatures should be verified before they reach here, and if // the leader is unknown they should fail signature check. So here we // should expect to know the slot leader and otherwise skip the shred. let slot_leader = match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) { Some(pubkey) => pubkey, None => { stats .unknown_shred_slot_leader .fetch_add(1, Ordering::Relaxed); return 0; } }; let cluster_nodes = cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info); let addrs: Vec<_> = cluster_nodes .get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT) .into_iter() .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) .collect(); compute_turbine_peers.stop(); stats .compute_turbine_peers_total .fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed); let mut retransmit_time = Measure::start("retransmit_to"); let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) { Ok(()) => addrs.len(), Err(SendPktsError::IoError(ioerr, num_failed)) => { stats .num_addrs_failed .fetch_add(num_failed, Ordering::Relaxed); error!( "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", ioerr, num_failed, addrs.len(), ); addrs.len() - num_failed } }; retransmit_time.stop(); stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed); stats .retransmit_total .fetch_add(retransmit_time.as_us(), Ordering::Relaxed); num_nodes }; fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V> where K: Eq + std::hash::Hash, V: Default + AddAssign, { if acc.len() < other.len() { return merge(other, acc); } for (key, value) in other { *acc.entry(key).or_default() += value; } acc } let slot_stats = thread_pool.install(|| { shreds .into_par_iter() .with_min_len(4) .map(|shred| { let index = thread_pool.current_thread_index().unwrap(); let socket = &sockets[index % sockets.len()]; let num_nodes = retransmit_shred(&shred, socket); (shred.slot(), num_nodes) }) .fold( HashMap::<Slot, RetransmitSlotStats>::new, |mut acc, (slot, num_nodes)| { let stats = acc.entry(slot).or_default(); stats.num_nodes += num_nodes; stats.num_shreds += 1; acc }, ) .reduce(HashMap::new, merge) }); stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats); timer_start.stop(); stats.total_time += timer_start.as_us(); stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache); Ok(()) } /// Service to retransmit messages from the leader or layer 1 to relevant peer nodes. /// See `cluster_info` for network layer definitions. /// # Arguments /// * `sockets` - Sockets to read from. /// * `bank_forks` - The BankForks structure /// * `leader_schedule_cache` - The leader schedule to verify shreds /// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip. /// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes. pub fn retransmitter( sockets: Arc<Vec<UdpSocket>>, bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, cluster_info: Arc<ClusterInfo>, shreds_receiver: Receiver<Vec<Shred>>, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, CLUSTER_NODES_CACHE_TTL, ); let mut hasher_reset_ts = Instant::now(); let mut stats = RetransmitStats::default(); let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default())); let first_shreds_received = Mutex::<BTreeSet<Slot>>::default(); let num_threads = get_thread_count().min(8).max(sockets.len()); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("retransmit-{}", i)) .build() .unwrap(); Builder::new() .name("solana-retransmitter".to_string()) .spawn(move || { trace!("retransmitter started"); loop { match retransmit( &thread_pool, &bank_forks, &leader_schedule_cache, &cluster_info, &shreds_receiver, &sockets, &mut stats, &cluster_nodes_cache, &mut hasher_reset_ts, &shreds_received, &max_slots, &first_shreds_received, rpc_subscriptions.as_deref(), ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), Err(RecvTimeoutError::Disconnected) => break, } } trace!("exiting retransmitter"); }) .unwrap() } pub struct RetransmitStage { retransmit_thread_handle: JoinHandle<()>, window_service: WindowService, cluster_slots_service: ClusterSlotsService, } impl RetransmitStage { #[allow(clippy::new_ret_no_self)] #[allow(clippy::too_many_arguments)] pub(crate) fn new( bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, blockstore: Arc<Blockstore>, cluster_info: Arc<ClusterInfo>, retransmit_sockets: Arc<Vec<UdpSocket>>, repair_socket: Arc<UdpSocket>, ancestor_hashes_socket: Arc<UdpSocket>, verified_receiver: Receiver<Vec<PacketBatch>>, exit: Arc<AtomicBool>, cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, cfg: Option<Arc<AtomicBool>>, shred_version: u16, cluster_slots: Arc<ClusterSlots>, duplicate_slots_reset_sender: DuplicateSlotsResetSender, verified_vote_receiver: VerifiedVoteReceiver, repair_validators: Option<HashSet<Pubkey>>, completed_data_sets_sender: CompletedDataSetsSender, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, duplicate_slots_sender: Sender<Slot>, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { let (retransmit_sender, retransmit_receiver) = unbounded(); let retransmit_thread_handle = retransmitter( retransmit_sockets, bank_forks.clone(), leader_schedule_cache.clone(), cluster_info.clone(), retransmit_receiver, max_slots, rpc_subscriptions, ); let cluster_slots_service = ClusterSlotsService::new( blockstore.clone(), cluster_slots.clone(), bank_forks.clone(), cluster_info.clone(), cluster_slots_update_receiver, exit.clone(), ); let leader_schedule_cache_clone = leader_schedule_cache.clone(); let repair_info = RepairInfo { bank_forks, epoch_schedule, duplicate_slots_reset_sender, repair_validators, cluster_info, cluster_slots, }; let window_service = WindowService::new( blockstore, verified_receiver, retransmit_sender, repair_socket, ancestor_hashes_socket, exit, repair_info, leader_schedule_cache, move |id, shred, working_bank, last_root| { let is_connected = cfg .as_ref() .map(|x| x.load(Ordering::Relaxed)) .unwrap_or(true); let rv = should_retransmit_and_persist( shred, working_bank, &leader_schedule_cache_clone, id, last_root, shred_version, ); rv && is_connected }, verified_vote_receiver, completed_data_sets_sender, duplicate_slots_sender, ancestor_hashes_replay_update_receiver, ); Self { retransmit_thread_handle, window_service, cluster_slots_service, } } pub(crate) fn join(self) -> thread::Result<()> { self.retransmit_thread_handle.join()?; self.window_service.join()?; self.cluster_slots_service.join() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_already_received() { let slot = 1; let index = 5; let version = 0x40; let shred = Shred::new_from_data(slot, index, 0, None, true, true, 0, version, 0); let shreds_received = Arc::new(Mutex::new((LruCache::new(100), PacketHasher::default()))); // unique shred for (1, 5) should pass assert!(!should_skip_retransmit(&shred, &shreds_received)); // duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 2, None, true, true, 0, version, 0); // first duplicate shred for (1, 5) passed assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 8, None, true, true, 0, version, 0); // 2nd duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version); // Coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version); // 2nd unique coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // same again is blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version); // Another unique coding at (1, 5) always blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); } }
{ *first_shreds_received_locked = first_shreds_received_locked.split_off(&(root_bank.slot() + 1)); }
conditional_block
retransmit_stage.rs
//! The `retransmit_stage` retransmits shreds between validators #![allow(clippy::rc_buffer)] use { crate::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, cluster_info_vote_listener::VerifiedVoteReceiver, cluster_nodes::ClusterNodesCache, cluster_slots::ClusterSlots, cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver}, completed_data_sets_service::CompletedDataSetsSender, packet_hasher::PacketHasher, repair_service::{DuplicateSlotsResetSender, RepairInfo}, window_service::{should_retransmit_and_persist, WindowService}, }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, lru::LruCache, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, solana_client::rpc_response::SlotUpdate, solana_gossip::{ cluster_info::{ClusterInfo, DATA_PLANE_FANOUT}, contact_info::ContactInfo, }, solana_ledger::{ blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::{Shred, ShredId}, }, solana_measure::measure::Measure, solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}, solana_streamer::sendmmsg::{multi_target_send, SendPktsError}, std::{ collections::{BTreeSet, HashMap, HashSet}, net::UdpSocket, ops::{AddAssign, DerefMut}, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, }; const MAX_DUPLICATE_COUNT: usize = 2; const DEFAULT_LRU_SIZE: usize = 10_000; const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8; const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5); #[derive(Default)] struct RetransmitSlotStats { num_shreds: usize, num_nodes: usize, } impl AddAssign for RetransmitSlotStats { fn add_assign(&mut self, other: Self) { *self = Self { num_shreds: self.num_shreds + other.num_shreds, num_nodes: self.num_nodes + other.num_nodes, } } } #[derive(Default)] struct RetransmitStats { since: Option<Instant>, num_nodes: AtomicUsize, num_addrs_failed: AtomicUsize, num_shreds: usize, num_shreds_skipped: AtomicUsize, total_batches: usize, total_time: u64, epoch_fetch: u64, epoch_cache_update: u64, retransmit_total: AtomicU64, compute_turbine_peers_total: AtomicU64, slot_stats: HashMap<Slot, RetransmitSlotStats>, unknown_shred_slot_leader: AtomicUsize, } impl RetransmitStats { fn maybe_submit( &mut self, root_bank: &Bank, working_bank: &Bank, cluster_info: &ClusterInfo, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, ) { const SUBMIT_CADENCE: Duration = Duration::from_secs(2); let elapsed = self.since.as_ref().map(Instant::elapsed); if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE { return; } let num_peers = cluster_nodes_cache .get(root_bank.slot(), root_bank, working_bank, cluster_info) .num_peers(); let stats = std::mem::replace( self, Self { since: Some(Instant::now()), ..Self::default() }, ); datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64)); datapoint_info!( "retransmit-stage", ("total_time", stats.total_time, i64), ("epoch_fetch", stats.epoch_fetch, i64), ("epoch_cache_update", stats.epoch_cache_update, i64), ("total_batches", stats.total_batches, i64), ("num_nodes", stats.num_nodes.into_inner(), i64), ("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64), ("num_shreds", stats.num_shreds, i64), ( "num_shreds_skipped", stats.num_shreds_skipped.into_inner(), i64 ), ("retransmit_total", stats.retransmit_total.into_inner(), i64), ( "compute_turbine", stats.compute_turbine_peers_total.into_inner(), i64 ), ( "unknown_shred_slot_leader", stats.unknown_shred_slot_leader.into_inner(), i64 ), ); for (slot, stats) in stats.slot_stats { datapoint_info!( "retransmit-stage-slot-stats", ("slot", slot, i64), ("num_shreds", stats.num_shreds, i64), ("num_nodes", stats.num_nodes, i64), ); } } } // Map of shred (slot, index, type) => list of hash values seen for that key. type ShredFilter = LruCache<ShredId, Vec<u64>>; type ShredFilterAndHasher = (ShredFilter, PacketHasher); // Returns true if shred is already received and should skip retransmit. fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool { let key = shred.id(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); match cache.get_mut(&key) { Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true, Some(sent) => { let hash = hasher.hash_shred(shred); if sent.contains(&hash) { true } else { sent.push(hash); false } } None => { let hash = hasher.hash_shred(shred); cache.put(key, vec![hash]); false } } } // Returns true if this is the first time receiving a shred for `shred_slot`. fn
( shred_slot: Slot, first_shreds_received: &Mutex<BTreeSet<Slot>>, root_bank: &Bank, ) -> bool { if shred_slot <= root_bank.slot() { return false; } let mut first_shreds_received_locked = first_shreds_received.lock().unwrap(); if first_shreds_received_locked.insert(shred_slot) { datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64)); if first_shreds_received_locked.len() > 100 { *first_shreds_received_locked = first_shreds_received_locked.split_off(&(root_bank.slot() + 1)); } true } else { false } } fn maybe_reset_shreds_received_cache( shreds_received: &Mutex<ShredFilterAndHasher>, hasher_reset_ts: &mut Instant, ) { const UPDATE_INTERVAL: Duration = Duration::from_secs(1); if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL { *hasher_reset_ts = Instant::now(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); cache.clear(); hasher.reset(); } } #[allow(clippy::too_many_arguments)] fn retransmit( thread_pool: &ThreadPool, bank_forks: &RwLock<BankForks>, leader_schedule_cache: &LeaderScheduleCache, cluster_info: &ClusterInfo, shreds_receiver: &Receiver<Vec<Shred>>, sockets: &[UdpSocket], stats: &mut RetransmitStats, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, hasher_reset_ts: &mut Instant, shreds_received: &Mutex<ShredFilterAndHasher>, max_slots: &MaxSlots, first_shreds_received: &Mutex<BTreeSet<Slot>>, rpc_subscriptions: Option<&RpcSubscriptions>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; let mut timer_start = Measure::start("retransmit"); shreds.extend(shreds_receiver.try_iter().flatten()); stats.num_shreds += shreds.len(); stats.total_batches += 1; let mut epoch_fetch = Measure::start("retransmit_epoch_fetch"); let (working_bank, root_bank) = { let bank_forks = bank_forks.read().unwrap(); (bank_forks.working_bank(), bank_forks.root_bank()) }; epoch_fetch.stop(); stats.epoch_fetch += epoch_fetch.as_us(); let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update"); maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts); epoch_cache_update.stop(); stats.epoch_cache_update += epoch_cache_update.as_us(); let socket_addr_space = cluster_info.socket_addr_space(); let retransmit_shred = |shred: &Shred, socket: &UdpSocket| { if should_skip_retransmit(shred, shreds_received) { stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed); return 0; } let shred_slot = shred.slot(); max_slots .retransmit .fetch_max(shred_slot, Ordering::Relaxed); if let Some(rpc_subscriptions) = rpc_subscriptions { if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) { rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived { slot: shred_slot, timestamp: timestamp(), }); } } let mut compute_turbine_peers = Measure::start("turbine_start"); // TODO: consider using root-bank here for leader lookup! // Shreds' signatures should be verified before they reach here, and if // the leader is unknown they should fail signature check. So here we // should expect to know the slot leader and otherwise skip the shred. let slot_leader = match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) { Some(pubkey) => pubkey, None => { stats .unknown_shred_slot_leader .fetch_add(1, Ordering::Relaxed); return 0; } }; let cluster_nodes = cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info); let addrs: Vec<_> = cluster_nodes .get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT) .into_iter() .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) .collect(); compute_turbine_peers.stop(); stats .compute_turbine_peers_total .fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed); let mut retransmit_time = Measure::start("retransmit_to"); let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) { Ok(()) => addrs.len(), Err(SendPktsError::IoError(ioerr, num_failed)) => { stats .num_addrs_failed .fetch_add(num_failed, Ordering::Relaxed); error!( "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", ioerr, num_failed, addrs.len(), ); addrs.len() - num_failed } }; retransmit_time.stop(); stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed); stats .retransmit_total .fetch_add(retransmit_time.as_us(), Ordering::Relaxed); num_nodes }; fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V> where K: Eq + std::hash::Hash, V: Default + AddAssign, { if acc.len() < other.len() { return merge(other, acc); } for (key, value) in other { *acc.entry(key).or_default() += value; } acc } let slot_stats = thread_pool.install(|| { shreds .into_par_iter() .with_min_len(4) .map(|shred| { let index = thread_pool.current_thread_index().unwrap(); let socket = &sockets[index % sockets.len()]; let num_nodes = retransmit_shred(&shred, socket); (shred.slot(), num_nodes) }) .fold( HashMap::<Slot, RetransmitSlotStats>::new, |mut acc, (slot, num_nodes)| { let stats = acc.entry(slot).or_default(); stats.num_nodes += num_nodes; stats.num_shreds += 1; acc }, ) .reduce(HashMap::new, merge) }); stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats); timer_start.stop(); stats.total_time += timer_start.as_us(); stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache); Ok(()) } /// Service to retransmit messages from the leader or layer 1 to relevant peer nodes. /// See `cluster_info` for network layer definitions. /// # Arguments /// * `sockets` - Sockets to read from. /// * `bank_forks` - The BankForks structure /// * `leader_schedule_cache` - The leader schedule to verify shreds /// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip. /// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes. pub fn retransmitter( sockets: Arc<Vec<UdpSocket>>, bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, cluster_info: Arc<ClusterInfo>, shreds_receiver: Receiver<Vec<Shred>>, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, CLUSTER_NODES_CACHE_TTL, ); let mut hasher_reset_ts = Instant::now(); let mut stats = RetransmitStats::default(); let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default())); let first_shreds_received = Mutex::<BTreeSet<Slot>>::default(); let num_threads = get_thread_count().min(8).max(sockets.len()); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("retransmit-{}", i)) .build() .unwrap(); Builder::new() .name("solana-retransmitter".to_string()) .spawn(move || { trace!("retransmitter started"); loop { match retransmit( &thread_pool, &bank_forks, &leader_schedule_cache, &cluster_info, &shreds_receiver, &sockets, &mut stats, &cluster_nodes_cache, &mut hasher_reset_ts, &shreds_received, &max_slots, &first_shreds_received, rpc_subscriptions.as_deref(), ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), Err(RecvTimeoutError::Disconnected) => break, } } trace!("exiting retransmitter"); }) .unwrap() } pub struct RetransmitStage { retransmit_thread_handle: JoinHandle<()>, window_service: WindowService, cluster_slots_service: ClusterSlotsService, } impl RetransmitStage { #[allow(clippy::new_ret_no_self)] #[allow(clippy::too_many_arguments)] pub(crate) fn new( bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, blockstore: Arc<Blockstore>, cluster_info: Arc<ClusterInfo>, retransmit_sockets: Arc<Vec<UdpSocket>>, repair_socket: Arc<UdpSocket>, ancestor_hashes_socket: Arc<UdpSocket>, verified_receiver: Receiver<Vec<PacketBatch>>, exit: Arc<AtomicBool>, cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, cfg: Option<Arc<AtomicBool>>, shred_version: u16, cluster_slots: Arc<ClusterSlots>, duplicate_slots_reset_sender: DuplicateSlotsResetSender, verified_vote_receiver: VerifiedVoteReceiver, repair_validators: Option<HashSet<Pubkey>>, completed_data_sets_sender: CompletedDataSetsSender, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, duplicate_slots_sender: Sender<Slot>, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { let (retransmit_sender, retransmit_receiver) = unbounded(); let retransmit_thread_handle = retransmitter( retransmit_sockets, bank_forks.clone(), leader_schedule_cache.clone(), cluster_info.clone(), retransmit_receiver, max_slots, rpc_subscriptions, ); let cluster_slots_service = ClusterSlotsService::new( blockstore.clone(), cluster_slots.clone(), bank_forks.clone(), cluster_info.clone(), cluster_slots_update_receiver, exit.clone(), ); let leader_schedule_cache_clone = leader_schedule_cache.clone(); let repair_info = RepairInfo { bank_forks, epoch_schedule, duplicate_slots_reset_sender, repair_validators, cluster_info, cluster_slots, }; let window_service = WindowService::new( blockstore, verified_receiver, retransmit_sender, repair_socket, ancestor_hashes_socket, exit, repair_info, leader_schedule_cache, move |id, shred, working_bank, last_root| { let is_connected = cfg .as_ref() .map(|x| x.load(Ordering::Relaxed)) .unwrap_or(true); let rv = should_retransmit_and_persist( shred, working_bank, &leader_schedule_cache_clone, id, last_root, shred_version, ); rv && is_connected }, verified_vote_receiver, completed_data_sets_sender, duplicate_slots_sender, ancestor_hashes_replay_update_receiver, ); Self { retransmit_thread_handle, window_service, cluster_slots_service, } } pub(crate) fn join(self) -> thread::Result<()> { self.retransmit_thread_handle.join()?; self.window_service.join()?; self.cluster_slots_service.join() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_already_received() { let slot = 1; let index = 5; let version = 0x40; let shred = Shred::new_from_data(slot, index, 0, None, true, true, 0, version, 0); let shreds_received = Arc::new(Mutex::new((LruCache::new(100), PacketHasher::default()))); // unique shred for (1, 5) should pass assert!(!should_skip_retransmit(&shred, &shreds_received)); // duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 2, None, true, true, 0, version, 0); // first duplicate shred for (1, 5) passed assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 8, None, true, true, 0, version, 0); // 2nd duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version); // Coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version); // 2nd unique coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // same again is blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version); // Another unique coding at (1, 5) always blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); } }
check_if_first_shred_received
identifier_name
retransmit_stage.rs
//! The `retransmit_stage` retransmits shreds between validators #![allow(clippy::rc_buffer)] use { crate::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, cluster_info_vote_listener::VerifiedVoteReceiver, cluster_nodes::ClusterNodesCache, cluster_slots::ClusterSlots, cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver}, completed_data_sets_service::CompletedDataSetsSender, packet_hasher::PacketHasher, repair_service::{DuplicateSlotsResetSender, RepairInfo}, window_service::{should_retransmit_and_persist, WindowService}, }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, lru::LruCache, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, solana_client::rpc_response::SlotUpdate, solana_gossip::{ cluster_info::{ClusterInfo, DATA_PLANE_FANOUT}, contact_info::ContactInfo, }, solana_ledger::{ blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::{Shred, ShredId}, }, solana_measure::measure::Measure, solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}, solana_streamer::sendmmsg::{multi_target_send, SendPktsError}, std::{ collections::{BTreeSet, HashMap, HashSet}, net::UdpSocket, ops::{AddAssign, DerefMut}, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, }; const MAX_DUPLICATE_COUNT: usize = 2; const DEFAULT_LRU_SIZE: usize = 10_000; const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8; const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5); #[derive(Default)] struct RetransmitSlotStats { num_shreds: usize, num_nodes: usize, } impl AddAssign for RetransmitSlotStats { fn add_assign(&mut self, other: Self) { *self = Self { num_shreds: self.num_shreds + other.num_shreds, num_nodes: self.num_nodes + other.num_nodes, } } } #[derive(Default)] struct RetransmitStats { since: Option<Instant>, num_nodes: AtomicUsize, num_addrs_failed: AtomicUsize, num_shreds: usize, num_shreds_skipped: AtomicUsize, total_batches: usize, total_time: u64, epoch_fetch: u64, epoch_cache_update: u64, retransmit_total: AtomicU64, compute_turbine_peers_total: AtomicU64, slot_stats: HashMap<Slot, RetransmitSlotStats>, unknown_shred_slot_leader: AtomicUsize, } impl RetransmitStats { fn maybe_submit( &mut self, root_bank: &Bank, working_bank: &Bank, cluster_info: &ClusterInfo, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, ) { const SUBMIT_CADENCE: Duration = Duration::from_secs(2); let elapsed = self.since.as_ref().map(Instant::elapsed); if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE { return; } let num_peers = cluster_nodes_cache .get(root_bank.slot(), root_bank, working_bank, cluster_info) .num_peers(); let stats = std::mem::replace( self, Self { since: Some(Instant::now()), ..Self::default() }, ); datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64)); datapoint_info!( "retransmit-stage", ("total_time", stats.total_time, i64), ("epoch_fetch", stats.epoch_fetch, i64), ("epoch_cache_update", stats.epoch_cache_update, i64), ("total_batches", stats.total_batches, i64), ("num_nodes", stats.num_nodes.into_inner(), i64), ("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64), ("num_shreds", stats.num_shreds, i64), ( "num_shreds_skipped", stats.num_shreds_skipped.into_inner(), i64 ), ("retransmit_total", stats.retransmit_total.into_inner(), i64), ( "compute_turbine", stats.compute_turbine_peers_total.into_inner(), i64 ), ( "unknown_shred_slot_leader", stats.unknown_shred_slot_leader.into_inner(), i64 ), ); for (slot, stats) in stats.slot_stats { datapoint_info!( "retransmit-stage-slot-stats", ("slot", slot, i64), ("num_shreds", stats.num_shreds, i64), ("num_nodes", stats.num_nodes, i64), ); } } } // Map of shred (slot, index, type) => list of hash values seen for that key. type ShredFilter = LruCache<ShredId, Vec<u64>>; type ShredFilterAndHasher = (ShredFilter, PacketHasher); // Returns true if shred is already received and should skip retransmit. fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool { let key = shred.id(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); match cache.get_mut(&key) { Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true, Some(sent) => { let hash = hasher.hash_shred(shred); if sent.contains(&hash) { true } else { sent.push(hash); false } } None => { let hash = hasher.hash_shred(shred); cache.put(key, vec![hash]); false } } } // Returns true if this is the first time receiving a shred for `shred_slot`. fn check_if_first_shred_received( shred_slot: Slot, first_shreds_received: &Mutex<BTreeSet<Slot>>, root_bank: &Bank, ) -> bool { if shred_slot <= root_bank.slot() { return false; } let mut first_shreds_received_locked = first_shreds_received.lock().unwrap(); if first_shreds_received_locked.insert(shred_slot) { datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64)); if first_shreds_received_locked.len() > 100 { *first_shreds_received_locked = first_shreds_received_locked.split_off(&(root_bank.slot() + 1)); } true } else { false } } fn maybe_reset_shreds_received_cache( shreds_received: &Mutex<ShredFilterAndHasher>, hasher_reset_ts: &mut Instant, ) { const UPDATE_INTERVAL: Duration = Duration::from_secs(1); if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL { *hasher_reset_ts = Instant::now(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); cache.clear(); hasher.reset(); } } #[allow(clippy::too_many_arguments)] fn retransmit(
sockets: &[UdpSocket], stats: &mut RetransmitStats, cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>, hasher_reset_ts: &mut Instant, shreds_received: &Mutex<ShredFilterAndHasher>, max_slots: &MaxSlots, first_shreds_received: &Mutex<BTreeSet<Slot>>, rpc_subscriptions: Option<&RpcSubscriptions>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; let mut timer_start = Measure::start("retransmit"); shreds.extend(shreds_receiver.try_iter().flatten()); stats.num_shreds += shreds.len(); stats.total_batches += 1; let mut epoch_fetch = Measure::start("retransmit_epoch_fetch"); let (working_bank, root_bank) = { let bank_forks = bank_forks.read().unwrap(); (bank_forks.working_bank(), bank_forks.root_bank()) }; epoch_fetch.stop(); stats.epoch_fetch += epoch_fetch.as_us(); let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update"); maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts); epoch_cache_update.stop(); stats.epoch_cache_update += epoch_cache_update.as_us(); let socket_addr_space = cluster_info.socket_addr_space(); let retransmit_shred = |shred: &Shred, socket: &UdpSocket| { if should_skip_retransmit(shred, shreds_received) { stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed); return 0; } let shred_slot = shred.slot(); max_slots .retransmit .fetch_max(shred_slot, Ordering::Relaxed); if let Some(rpc_subscriptions) = rpc_subscriptions { if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) { rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived { slot: shred_slot, timestamp: timestamp(), }); } } let mut compute_turbine_peers = Measure::start("turbine_start"); // TODO: consider using root-bank here for leader lookup! // Shreds' signatures should be verified before they reach here, and if // the leader is unknown they should fail signature check. So here we // should expect to know the slot leader and otherwise skip the shred. let slot_leader = match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) { Some(pubkey) => pubkey, None => { stats .unknown_shred_slot_leader .fetch_add(1, Ordering::Relaxed); return 0; } }; let cluster_nodes = cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info); let addrs: Vec<_> = cluster_nodes .get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT) .into_iter() .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) .collect(); compute_turbine_peers.stop(); stats .compute_turbine_peers_total .fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed); let mut retransmit_time = Measure::start("retransmit_to"); let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) { Ok(()) => addrs.len(), Err(SendPktsError::IoError(ioerr, num_failed)) => { stats .num_addrs_failed .fetch_add(num_failed, Ordering::Relaxed); error!( "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", ioerr, num_failed, addrs.len(), ); addrs.len() - num_failed } }; retransmit_time.stop(); stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed); stats .retransmit_total .fetch_add(retransmit_time.as_us(), Ordering::Relaxed); num_nodes }; fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V> where K: Eq + std::hash::Hash, V: Default + AddAssign, { if acc.len() < other.len() { return merge(other, acc); } for (key, value) in other { *acc.entry(key).or_default() += value; } acc } let slot_stats = thread_pool.install(|| { shreds .into_par_iter() .with_min_len(4) .map(|shred| { let index = thread_pool.current_thread_index().unwrap(); let socket = &sockets[index % sockets.len()]; let num_nodes = retransmit_shred(&shred, socket); (shred.slot(), num_nodes) }) .fold( HashMap::<Slot, RetransmitSlotStats>::new, |mut acc, (slot, num_nodes)| { let stats = acc.entry(slot).or_default(); stats.num_nodes += num_nodes; stats.num_shreds += 1; acc }, ) .reduce(HashMap::new, merge) }); stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats); timer_start.stop(); stats.total_time += timer_start.as_us(); stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache); Ok(()) } /// Service to retransmit messages from the leader or layer 1 to relevant peer nodes. /// See `cluster_info` for network layer definitions. /// # Arguments /// * `sockets` - Sockets to read from. /// * `bank_forks` - The BankForks structure /// * `leader_schedule_cache` - The leader schedule to verify shreds /// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip. /// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes. pub fn retransmitter( sockets: Arc<Vec<UdpSocket>>, bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, cluster_info: Arc<ClusterInfo>, shreds_receiver: Receiver<Vec<Shred>>, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, CLUSTER_NODES_CACHE_TTL, ); let mut hasher_reset_ts = Instant::now(); let mut stats = RetransmitStats::default(); let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default())); let first_shreds_received = Mutex::<BTreeSet<Slot>>::default(); let num_threads = get_thread_count().min(8).max(sockets.len()); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("retransmit-{}", i)) .build() .unwrap(); Builder::new() .name("solana-retransmitter".to_string()) .spawn(move || { trace!("retransmitter started"); loop { match retransmit( &thread_pool, &bank_forks, &leader_schedule_cache, &cluster_info, &shreds_receiver, &sockets, &mut stats, &cluster_nodes_cache, &mut hasher_reset_ts, &shreds_received, &max_slots, &first_shreds_received, rpc_subscriptions.as_deref(), ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), Err(RecvTimeoutError::Disconnected) => break, } } trace!("exiting retransmitter"); }) .unwrap() } pub struct RetransmitStage { retransmit_thread_handle: JoinHandle<()>, window_service: WindowService, cluster_slots_service: ClusterSlotsService, } impl RetransmitStage { #[allow(clippy::new_ret_no_self)] #[allow(clippy::too_many_arguments)] pub(crate) fn new( bank_forks: Arc<RwLock<BankForks>>, leader_schedule_cache: Arc<LeaderScheduleCache>, blockstore: Arc<Blockstore>, cluster_info: Arc<ClusterInfo>, retransmit_sockets: Arc<Vec<UdpSocket>>, repair_socket: Arc<UdpSocket>, ancestor_hashes_socket: Arc<UdpSocket>, verified_receiver: Receiver<Vec<PacketBatch>>, exit: Arc<AtomicBool>, cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, cfg: Option<Arc<AtomicBool>>, shred_version: u16, cluster_slots: Arc<ClusterSlots>, duplicate_slots_reset_sender: DuplicateSlotsResetSender, verified_vote_receiver: VerifiedVoteReceiver, repair_validators: Option<HashSet<Pubkey>>, completed_data_sets_sender: CompletedDataSetsSender, max_slots: Arc<MaxSlots>, rpc_subscriptions: Option<Arc<RpcSubscriptions>>, duplicate_slots_sender: Sender<Slot>, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { let (retransmit_sender, retransmit_receiver) = unbounded(); let retransmit_thread_handle = retransmitter( retransmit_sockets, bank_forks.clone(), leader_schedule_cache.clone(), cluster_info.clone(), retransmit_receiver, max_slots, rpc_subscriptions, ); let cluster_slots_service = ClusterSlotsService::new( blockstore.clone(), cluster_slots.clone(), bank_forks.clone(), cluster_info.clone(), cluster_slots_update_receiver, exit.clone(), ); let leader_schedule_cache_clone = leader_schedule_cache.clone(); let repair_info = RepairInfo { bank_forks, epoch_schedule, duplicate_slots_reset_sender, repair_validators, cluster_info, cluster_slots, }; let window_service = WindowService::new( blockstore, verified_receiver, retransmit_sender, repair_socket, ancestor_hashes_socket, exit, repair_info, leader_schedule_cache, move |id, shred, working_bank, last_root| { let is_connected = cfg .as_ref() .map(|x| x.load(Ordering::Relaxed)) .unwrap_or(true); let rv = should_retransmit_and_persist( shred, working_bank, &leader_schedule_cache_clone, id, last_root, shred_version, ); rv && is_connected }, verified_vote_receiver, completed_data_sets_sender, duplicate_slots_sender, ancestor_hashes_replay_update_receiver, ); Self { retransmit_thread_handle, window_service, cluster_slots_service, } } pub(crate) fn join(self) -> thread::Result<()> { self.retransmit_thread_handle.join()?; self.window_service.join()?; self.cluster_slots_service.join() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_already_received() { let slot = 1; let index = 5; let version = 0x40; let shred = Shred::new_from_data(slot, index, 0, None, true, true, 0, version, 0); let shreds_received = Arc::new(Mutex::new((LruCache::new(100), PacketHasher::default()))); // unique shred for (1, 5) should pass assert!(!should_skip_retransmit(&shred, &shreds_received)); // duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 2, None, true, true, 0, version, 0); // first duplicate shred for (1, 5) passed assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_from_data(slot, index, 8, None, true, true, 0, version, 0); // 2nd duplicate shred for (1, 5) blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version); // Coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version); // 2nd unique coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // same again is blocked assert!(should_skip_retransmit(&shred, &shreds_received)); let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version); // Another unique coding at (1, 5) always blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); } }
thread_pool: &ThreadPool, bank_forks: &RwLock<BankForks>, leader_schedule_cache: &LeaderScheduleCache, cluster_info: &ClusterInfo, shreds_receiver: &Receiver<Vec<Shred>>,
random_line_split
eng.ts
export default { error: { generic: 'Error', home: 'Go home' }, kara: { phrase: '{songtype} from {series}', meta: '{songtitle} from {serieSinger}', notfound: 'Karaoke not found', tagtypes: { series: 'Series', langs: 'Language | Languages', songtypes: 'Song type | Song types', singers: 'Singer | Singers', songwriters: 'Songwriter | Songwriters', families: 'Family | Families', origins: 'Origin | Origins', genres: 'Kind | Kinds', platforms: 'Platform | Platforms', creators: 'Creator | Creators', authors: 'Karaokes author | Karaokes authors', groups: 'Group | Groups', misc: 'Miscellaneous' }, duration: 'Duration ', created_at: 'Created ', modified_at: 'Last update ', add: 'Add to application', download: { karabundle: 'Download karaoke data', media: 'Download media' }, live: 'Open in a new tab', lyrics: { show: 'Show lyrics', hide: 'Hide lyrics' }, favorites: { add: 'Add to favorites', remove: 'Remove from favorites' }, problem: { title: 'An issue with {title}?', btn: { report: 'Report an issue', edit: 'Purpose an edit' }, form: { title: 'Report an issue', subtitle: 'We do our best to have a high-quality database, but sometimes some issues are there. Thanks for help!', type: { label: 'Problem type', time: 'Lyrics not synchronised', quality: 'Low quality video' }, comment: { label: 'Comment', placeholder: 'After 2 minutes, the lyrics is not synchronised' }, username: { label: 'Your name', placeholder: 'IAmMeticulous' }, submit: 'Submit', thanks: { text: 'Thanks! We will address this issue as soon as possible: {url}', btn: 'Close' } } }, import: { description: 'This form allows you to submit a karaoke to the Karaoke Mugen team. It will not be immediately integrated in the karaoke database because it requires a validation. Please be patient. Your karaoke may be modified if it doesn\'t comply to KM\'s rules.', attention: 'ATTENTION:', check_in_progress: 'Please check the list of karaokes currently being made before sending us a song. This\'ll avoid duplicate work, and the world will thus be a better place.', documentation_link: 'Documentation', in_progress_link: 'Karaokes In Progress List', license_reminder: 'Your karaoke will be published with the {name} license', license_link: 'Learn more about this license by clicking here.', add: 'Add', create: 'Create', choose_file: 'Choose a file', add_file_media_error: '{name} is not a media file', add_file_lyrics_error: '{name} is not a subtitle file', add_file_success: '{name} file added successfully', comment: 'Leave a comment?', comment_edit: 'If you\'re submitting an edit, tell us who you are here!', comment_tooltip: 'If you want to add a message for the integrators or just say thanks, say it here!', submit: 'Send karaoke', media_file: 'Media file', media_file_required: 'Media file is mandatory', media_file_tooltip: 'Supported file formats: {formats}', lyrics_file: 'Lyrics file', lyrics_file_tooltip: 'Supported file formats: {formats}', title: 'Song title', title_required: 'Please enter a song title', title_tooltip: 'If you don\'t know, put the name of the series here as well. In the case of an alternative version, name your title as: \'My title ~ Disco vers.\' for example', series_tooltip: 'TV series, movie title, video game title, etc.', series_singers_required: 'Series or singers cannot be empty in the same time.', songtypes_required: 'Song type is mandatory', songorder: 'Song order', songorder_invalid: 'Song order is invalid', songorder_tooltip: 'Opening/Ending number. If this is the only opening/ending in the series, leave blank.', langs_required: 'Please choose a language', year: 'Broadcast year', year_tooltip: 'Year when the series was broadcasted or the video was produced', year_required: 'Broadcast year is mandatory', year_invalid: 'Broadcast year is invalid', songwriters_tooltip: 'Songwriters compose lyrics AND music.', creators_tooltip: 'Entity that created the series. Can be animation studio, movie studio, or game studio', authors_tooltip: 'You should add yourself here ;)', authors_required: 'Author of the kara is mandatory', groups_tooltip: 'Download groups for this song. The song will be included in these download packs', created_at: 'Creation date', modified_at: 'Last updated date', add_success: 'Your karaoke has been successfully sent!', add_success_description: 'An issue has been created on our tracker. You can check its progression at {url}', add_error: 'An error has occurred, karaoke has not been sent properly', restart: 'Submit new karaoke' }, stats: { favorited: 'Added to favorites by {number} users', requested: 'Requested {number} times', played: 'Played {number} times' } }, layout: { loading: 'Loading...', suggest: 'Can\'t find what you\'re looking for?', suggest_open: 'Suggest us!', empty: 'This is the end of your favorites.', explore: 'Go add some!', results: '{count} result | {count} results', slogan: 'This song is available on the Karaoke Mugen songbase!' }, footer: { home: 'Project home', software_under_license: 'Software under license', base_under_licence: 'Karaoke base under license' }, stats: { karaokes: 'Karaoké | Karaokés', all_duration: 'Duration of all karas', last_generation: 'Last update', media_size: 'Media Size' }, home: { noInstance: { title: 'No Karaoke Mugen instance runs on your local network.', 1: 'Double check you\'re logged in to the same WiFi network as the server. ', 2: 'Make sure the Karaoke Mugen application is running. Please check that the kara.moe setting is enabled in Options -> Karaoke -> Short URL (kara.moe).', 3: 'If you just want to explore the base, you can safely ignore this message.' } }, duration: { days: 'days', hours: 'hours', minutes: 'minutes', seconds: 'seconds' }, menu: { add_repository: 'Add this repository to your app', database: 'Database', karas: 'Songs', songtypes: 'Types', tags: 'Tags', miscs: 'Miscs', groups: 'Groups', families: 'Families', origins: 'Origins', genres: 'Genres', platforms: 'Platforms', singers: 'Singers', series: 'Series', songwriters: 'Songwriters', creators: 'Creators', authors: 'Authors', languages: 'Languages', years: 'Years', community: 'Community', kara_import: 'Submit a kara', account: 'Account', favorites: 'Favorites', login: 'Login', logout: 'Logout', register: 'Register', connection: 'Login', profile: 'Profile', switch_language: 'Switch language' }, search: { placeholder: 'Series, singers, names...', sort: { a_z: 'De A à Z', kara_count: 'Kara count', recent: 'Recent', most_played: 'Most played', most_favorites: 'Plus favoris', most_requested: 'Plus demandés' }, next: 'Next page', previous: 'Previous page', aria: { goto: 'Go to page {0}', page: 'Page {0}', sort: 'Sort by' } }, modal: { login: { title: 'Login', subtitle: 'Login to view your favorites and edit your profile!', fields: { username: { label: 'Username', placeholder: 'LoveLiveFan93' }, password: { label: 'Password', placeholder: 'ActuallyIdolM@sterIsBetter' }, forgot_password: { label: 'Forgot password?', error: 'Could not reset your password: contact the the server\'s administrator your account belongs to.', success: 'An email has been sent with a link to reset your password.' } }, submit: 'Login' }, signup: { title: 'Signup', subtitle: 'Signup to view your favorites and edit your profile!', fields: { username: { label: 'Username', placeholder: 'LoveLiveFan93' }, password: { label: 'Password', placeholder: 'ActuallyIdolM@sterIsBetter' }, password_confirmation: { label: 'Password Confirmation', placeholder: 'ActuallyIdolM@sterIsBetter' }, email: { label: 'Email', placeholder: 'test@shelter.moe' } }, passwords_mismatch: 'Passwords do not match', submit: 'Signup' }, profile: { title: 'Edit profile', fields: { username: { label: 'Username' },
placeholder: 'LoveLiveFan93' }, password: { header: 'Change password', label: 'Password', placeholder: 'EnVraiJePréfèreIdolM@ster' }, password_confirmation: { label: 'Password confirmation', placeholder: 'EnVraiJePréfèreIdolM@ster' }, email: { label: 'Email', placeholder: 'test@shelter.moe' }, url: { label: 'Url', placeholder: 'https://karaokes.moe' }, bio: { label: 'Biography', placeholder: 'It\' s my life' } }, passwords_mismatch: 'Passwords do not match', submit: 'Save', delete: 'Delete account', series_name: { label: 'Series language display', original_name: 'Original name', song_lang: 'Song language', mode_admin: 'Karaoke Mugen language', user_lang: 'User language', mode_no_pref: 'No preference', force_lang_series: 'Force your own language', force_lang_series_main: 'Series name language', force_lang_series_fallback: 'Fallback series name language' } }, add_repository: { button: 'Repository :', label: 'Add this repository to your Karaoke Mugen app!', desc: 'You can add this repository to your Karaoke Mugen app by clicking on the button below. If Karaoke Mugen is not installed on your computer, this button will have no effect.', download: 'The application can be downloaded here.', manual: 'To manually add this repository, open your Karaoke Mugen application, add the {repository} repository to it, checking "{online}" then go to the downloads manager', online: 'Online', add: 'Add', cancel: 'Cancel' }, delete_account: { label: 'Do you really want to delete your account?', add: 'Delete the account', cancel: 'Cancel' }, suggest: { title: 'Suggestion', subtitle: 'You couldn\'t find what you\'ve searched for? You can still fill a suggestion ', fields: { title: { label: 'Title', placeholder: 'JINGO JUNGLE' }, series: { label: 'Series / Singer', placeholder: 'Yôjo Senki: Saga of Tanya the Evil' }, type: { label: 'Type' }, link: { label: 'Link', placeholder: 'https://www.youtube.com/watch?v=5VRyiaszGtA' }, name: { label: 'Your name', placeholder: 'Magic anonymous' } }, submit: 'Submit', submitted: { subtitle: 'Message heard loud and clear!', text: 'Your suggestion was received, you can check its status by clicking {here}.', here: 'here', close: 'Fermer' } } }, titles: { home: 'Home' }, toast: { LOG_ERROR: 'Incorrect credentials.', USER_CREATED: 'User successfully created', GENERATED_KARA: 'Karaoke sent successfully.', EDITED_KARA: 'Modification sent successfully.', CANNOT_GENERATE_KARA: 'Cannot send karaoke.', SUBFILE_FORMAT_UNKOWN: 'Subfile format unkown', CANNOT_EDIT_KARA: 'Cannot send modification.', FILE_UPLOADED: 'File uploaded.', USER_EDITED: 'User successfully edited', FUTURE_PROFILES: 'This button will be used to share your profile to everyone else, but that\'s for another moment!' } };
nickname: { label: 'Nickname',
random_line_split
quadstore.go
package sql import ( "database/sql" "database/sql/driver" "encoding/hex" "fmt" "strings" "time" "github.com/cayleygraph/cayley/clog" "github.com/cayleygraph/cayley/graph" "github.com/cayleygraph/cayley/graph/iterator" "github.com/cayleygraph/cayley/internal/lru" "github.com/cayleygraph/cayley/quad" "github.com/cayleygraph/cayley/quad/pquads" ) const QuadStoreType = "sql" func init() { graph.RegisterQuadStore(QuadStoreType, graph.QuadStoreRegistration{ NewFunc: newQuadStore, UpgradeFunc: nil, InitFunc: createSQLTables, IsPersistent: true, }) } type NodeHash [quad.HashSize]byte func (NodeHash) IsNode() bool { return true } func (h NodeHash) Key() interface{} { return h } func (h NodeHash) Valid() bool { return h != NodeHash{} } func (h NodeHash) toSQL() interface{} { if !h.Valid() { return nil } return []byte(h[:]) } func (h NodeHash) String() string { if !h.Valid() { return "" } return hex.EncodeToString(h[:]) } func (h *NodeHash) Scan(src interface{}) error { if src == nil { *h = NodeHash{} return nil } b, ok := src.([]byte) if !ok { return fmt.Errorf("cannot scan %T to NodeHash", src) } if len(b) == 0 { *h = NodeHash{} return nil } else if len(b) != quad.HashSize { return fmt.Errorf("unexpected hash length: %d", len(b)) } copy((*h)[:], b) return nil } func hashOf(s quad.Value) (out NodeHash) { if s == nil { return } quad.HashTo(s, out[:]) return } type QuadHashes [4]NodeHash func (QuadHashes) IsNode() bool { return false } func (q QuadHashes) Key() interface{} { return q } func (q QuadHashes) Get(d quad.Direction) NodeHash { switch d { case quad.Subject: return q[0] case quad.Predicate: return q[1] case quad.Object: return q[2] case quad.Label: return q[3] } panic(fmt.Errorf("unknown direction: %v", d)) } type QuadStore struct { db *sql.DB flavor Flavor size int64 ids *lru.Cache sizes *lru.Cache noSizes bool useEstimates bool } type Flavor struct { Name string Driver string NodesTable string QuadsTable string FieldQuote rune Placeholder func(int) string Indexes func(graph.Options) []string Error func(error) error Estimated func(table string) string RunTx func(tx *sql.Tx, in []graph.Delta, opts graph.IgnoreOpts) error NoSchemaChangesInTx bool } var flavors = make(map[string]Flavor) func RegisterFlavor(f Flavor) { flavors[f.Name] = f } const defaultFlavor = "postgres" func connect(addr string, flavor string, opts graph.Options) (*sql.DB, error) { // TODO(barakmich): Parse options for more friendly addr conn, err := sql.Open(flavor, addr) if err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } // "Open may just validate its arguments without creating a connection to the database." // "To verify that the data source name is valid, call Ping." // Source: http://golang.org/pkg/database/sql/#Open if err := conn.Ping(); err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } return conn, nil } var nodesColumns = []string{ "hash", "value", "value_string", "datatype", "language", "iri", "bnode", "value_int", "value_bool", "value_float", "value_time", } var nodeInsertColumns = [][]string{ {"value"}, {"value_string", "iri"}, {"value_string", "bnode"}, {"value_string"}, {"value_string", "datatype"}, {"value_string", "language"}, {"value_int"}, {"value_bool"}, {"value_float"}, {"value_time"}, } func createSQLTables(addr string, options graph.Options) error { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } conn, err := connect(addr, dr, options) if err != nil { return err } defer conn.Close() if fl.NoSchemaChangesInTx { _, err = conn.Exec(fl.NodesTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = conn.Exec(fl.QuadsTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = conn.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) return err } } return nil } tx, err := conn.Begin() if err != nil { clog.Errorf("Couldn't begin creation transaction: %s", err) return err } _, err = tx.Exec(fl.NodesTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = tx.Exec(fl.QuadsTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = tx.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) tx.Rollback() return err } } tx.Commit() return nil } func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return nil, fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } var qs QuadStore conn, err := connect(addr, dr, options) if err != nil { return nil, err } localOpt, localOptOk, err := options.BoolKey("local_optimize") if err != nil { return nil, err } qs.db = conn qs.flavor = fl qs.size = -1 qs.sizes = lru.New(1024) qs.ids = lru.New(1024) // Skip size checking by default. qs.noSizes = true if localOptOk { if localOpt { qs.noSizes = false } } qs.useEstimates, _, err = options.BoolKey("use_estimates") if err != nil { return nil, err } return &qs, nil } func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) { s, err = pquads.MarshalValue(q.Subject) if err != nil { return } p, err = pquads.MarshalValue(q.Predicate) if err != nil { return } o, err = pquads.MarshalValue(q.Object) if err != nil { return } l, err = pquads.MarshalValue(q.Label) if err != nil { return } return } func escapeNullByte(s string) string { return strings.Replace(s, "\u0000", `\x00`, -1) } func unescapeNullByte(s string) string { return strings.Replace(s, `\x00`, "\u0000", -1) } func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) { var ( nodeKey int values = []interface{}{h.toSQL(), nil, nil}[:1] ) switch v := v.(type) { case quad.IRI: nodeKey = 1 values = append(values, string(v), true) case quad.BNode: nodeKey = 2 values = append(values, string(v), true) case quad.String: nodeKey = 3 values = append(values, escapeNullByte(string(v))) case quad.TypedString: nodeKey = 4 values = append(values, escapeNullByte(string(v.Value)), string(v.Type)) case quad.LangString: nodeKey = 5 values = append(values, escapeNullByte(string(v.Value)), v.Lang) case quad.Int: nodeKey = 6 values = append(values, int64(v)) case quad.Bool: nodeKey = 7 values = append(values, bool(v)) case quad.Float: nodeKey = 8 values = append(values, float64(v)) case quad.Time: nodeKey = 9 values = append(values, time.Time(v)) default: nodeKey = 0 p, err := pquads.MarshalValue(v) if err != nil { clog.Errorf("couldn't marshal value: %v", err) return 0, nil, err } values = append(values, p) } return nodeKey, values, nil } func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error { tx, err := qs.db.Begin() if err != nil { clog.Errorf("couldn't begin write transaction: %v", err) return err } err = qs.flavor.RunTx(tx, in, opts) if err != nil { tx.Rollback() return err } qs.size = -1 // TODO(barakmich): Sync size with writes. return tx.Commit() } func (qs *QuadStore) Quad(val graph.Value) quad.Quad { h := val.(QuadHashes) return quad.Quad{ Subject: qs.NameOf(h.Get(quad.Subject)), Predicate: qs.NameOf(h.Get(quad.Predicate)), Object: qs.NameOf(h.Get(quad.Object)), Label: qs.NameOf(h.Get(quad.Label)), } } func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator { return newSQLLinkIterator(qs, d, val.(NodeHash)) } func (qs *QuadStore) NodesAllIterator() graph.Iterator { return NewAllIterator(qs, "nodes") } func (qs *QuadStore) QuadsAllIterator() graph.Iterator { return NewAllIterator(qs, "quads") } func (qs *QuadStore) ValueOf(s quad.Value) graph.Value { return NodeHash(hashOf(s)) } // NullTime represents a time.Time that may be null. NullTime implements the // sql.Scanner interface so it can be used as a scan destination, similar to // sql.NullString. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. func (nt *NullTime) Scan(value interface{}) error { if value == nil { nt.Time, nt.Valid = time.Time{}, false return nil } switch value := value.(type) { case time.Time: nt.Time, nt.Valid = value, true case []byte: t, err := time.Parse("2006-01-02 15:04:05.999999", string(value)) if err != nil { return err } nt.Time, nt.Valid = t, true default: return fmt.Errorf("unsupported time format: %T: %v", value, value) } return nil } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil }
clog.Infof("NameOf was nil") } return nil } else if v, ok := v.(graph.PreFetchedValue); ok { return v.NameOf() } hash := v.(NodeHash) if !hash.Valid() { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } if val, ok := qs.ids.Get(hash.String()); ok { return val.(quad.Value) } query := `SELECT value, value_string, datatype, language, iri, bnode, value_int, value_bool, value_float, value_time FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;` c := qs.db.QueryRow(query, hash.toSQL()) var ( data []byte str sql.NullString typ sql.NullString lang sql.NullString iri sql.NullBool bnode sql.NullBool vint sql.NullInt64 vbool sql.NullBool vfloat sql.NullFloat64 vtime NullTime ) if err := c.Scan( &data, &str, &typ, &lang, &iri, &bnode, &vint, &vbool, &vfloat, &vtime, ); err != nil { clog.Errorf("Couldn't execute value lookup: %v", err) return nil } var val quad.Value if str.Valid { if iri.Bool { val = quad.IRI(str.String) } else if bnode.Bool { val = quad.BNode(str.String) } else if lang.Valid { val = quad.LangString{ Value: quad.String(unescapeNullByte(str.String)), Lang: lang.String, } } else if typ.Valid { val = quad.TypedString{ Value: quad.String(unescapeNullByte(str.String)), Type: quad.IRI(typ.String), } } else { val = quad.String(unescapeNullByte(str.String)) } } else if vint.Valid { val = quad.Int(vint.Int64) } else if vbool.Valid { val = quad.Bool(vbool.Bool) } else if vfloat.Valid { val = quad.Float(vfloat.Float64) } else if vtime.Valid { val = quad.Time(vtime.Time) } else { qv, err := pquads.UnmarshalValue(data) if err != nil { clog.Errorf("Couldn't unmarshal value: %v", err) return nil } val = qv } if val != nil { qs.ids.Put(hash.String(), val) } return val } func (qs *QuadStore) Size() int64 { if qs.size != -1 { return qs.size } query := "SELECT COUNT(*) FROM quads;" if qs.useEstimates && qs.flavor.Estimated != nil { query = qs.flavor.Estimated("quads") } c := qs.db.QueryRow(query) err := c.Scan(&qs.size) if err != nil { clog.Errorf("Couldn't execute COUNT: %v", err) return 0 } return qs.size } func (qs *QuadStore) Horizon() graph.PrimaryKey { var horizon int64 err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon) if err != nil { if err != sql.ErrNoRows { clog.Errorf("Couldn't execute horizon: %v", err) } return graph.NewSequentialKey(0) } return graph.NewSequentialKey(horizon) } func (qs *QuadStore) FixedIterator() graph.FixedIterator { return iterator.NewFixed(iterator.Identity) } func (qs *QuadStore) Close() error { return qs.db.Close() } func (qs *QuadStore) QuadDirection(in graph.Value, d quad.Direction) graph.Value { return NodeHash(in.(QuadHashes).Get(d)) } func (qs *QuadStore) Type() string { return QuadStoreType } func (qs *QuadStore) sizeForIterator(isAll bool, dir quad.Direction, hash NodeHash) int64 { var err error if isAll { return qs.Size() } if qs.noSizes { if dir == quad.Predicate { return (qs.Size() / 100) + 1 } return (qs.Size() / 1000) + 1 } if val, ok := qs.sizes.Get(hash.String() + string(dir.Prefix())); ok { return val.(int64) } var size int64 if clog.V(4) { clog.Infof("sql: getting size for select %s, %v", dir.String(), hash) } err = qs.db.QueryRow( fmt.Sprintf("SELECT count(*) FROM quads WHERE %s_hash = "+qs.flavor.Placeholder(1)+";", dir.String()), hash.toSQL()).Scan(&size) if err != nil { clog.Errorf("Error getting size from SQL database: %v", err) return 0 } qs.sizes.Put(hash.String()+string(dir.Prefix()), size) return size }
func (qs *QuadStore) NameOf(v graph.Value) quad.Value { if v == nil { if clog.V(2) {
random_line_split
quadstore.go
package sql import ( "database/sql" "database/sql/driver" "encoding/hex" "fmt" "strings" "time" "github.com/cayleygraph/cayley/clog" "github.com/cayleygraph/cayley/graph" "github.com/cayleygraph/cayley/graph/iterator" "github.com/cayleygraph/cayley/internal/lru" "github.com/cayleygraph/cayley/quad" "github.com/cayleygraph/cayley/quad/pquads" ) const QuadStoreType = "sql" func init() { graph.RegisterQuadStore(QuadStoreType, graph.QuadStoreRegistration{ NewFunc: newQuadStore, UpgradeFunc: nil, InitFunc: createSQLTables, IsPersistent: true, }) } type NodeHash [quad.HashSize]byte func (NodeHash) IsNode() bool { return true } func (h NodeHash) Key() interface{} { return h } func (h NodeHash) Valid() bool { return h != NodeHash{} } func (h NodeHash) toSQL() interface{} { if !h.Valid() { return nil } return []byte(h[:]) } func (h NodeHash) String() string { if !h.Valid() { return "" } return hex.EncodeToString(h[:]) } func (h *NodeHash) Scan(src interface{}) error { if src == nil { *h = NodeHash{} return nil } b, ok := src.([]byte) if !ok { return fmt.Errorf("cannot scan %T to NodeHash", src) } if len(b) == 0 { *h = NodeHash{} return nil } else if len(b) != quad.HashSize { return fmt.Errorf("unexpected hash length: %d", len(b)) } copy((*h)[:], b) return nil } func hashOf(s quad.Value) (out NodeHash) { if s == nil { return } quad.HashTo(s, out[:]) return } type QuadHashes [4]NodeHash func (QuadHashes) IsNode() bool { return false } func (q QuadHashes) Key() interface{} { return q } func (q QuadHashes) Get(d quad.Direction) NodeHash { switch d { case quad.Subject: return q[0] case quad.Predicate: return q[1] case quad.Object: return q[2] case quad.Label: return q[3] } panic(fmt.Errorf("unknown direction: %v", d)) } type QuadStore struct { db *sql.DB flavor Flavor size int64 ids *lru.Cache sizes *lru.Cache noSizes bool useEstimates bool } type Flavor struct { Name string Driver string NodesTable string QuadsTable string FieldQuote rune Placeholder func(int) string Indexes func(graph.Options) []string Error func(error) error Estimated func(table string) string RunTx func(tx *sql.Tx, in []graph.Delta, opts graph.IgnoreOpts) error NoSchemaChangesInTx bool } var flavors = make(map[string]Flavor) func RegisterFlavor(f Flavor) { flavors[f.Name] = f } const defaultFlavor = "postgres" func connect(addr string, flavor string, opts graph.Options) (*sql.DB, error) { // TODO(barakmich): Parse options for more friendly addr conn, err := sql.Open(flavor, addr) if err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } // "Open may just validate its arguments without creating a connection to the database." // "To verify that the data source name is valid, call Ping." // Source: http://golang.org/pkg/database/sql/#Open if err := conn.Ping(); err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } return conn, nil } var nodesColumns = []string{ "hash", "value", "value_string", "datatype", "language", "iri", "bnode", "value_int", "value_bool", "value_float", "value_time", } var nodeInsertColumns = [][]string{ {"value"}, {"value_string", "iri"}, {"value_string", "bnode"}, {"value_string"}, {"value_string", "datatype"}, {"value_string", "language"}, {"value_int"}, {"value_bool"}, {"value_float"}, {"value_time"}, } func createSQLTables(addr string, options graph.Options) error { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } conn, err := connect(addr, dr, options) if err != nil { return err } defer conn.Close() if fl.NoSchemaChangesInTx { _, err = conn.Exec(fl.NodesTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = conn.Exec(fl.QuadsTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = conn.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) return err } } return nil } tx, err := conn.Begin() if err != nil { clog.Errorf("Couldn't begin creation transaction: %s", err) return err } _, err = tx.Exec(fl.NodesTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = tx.Exec(fl.QuadsTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = tx.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) tx.Rollback() return err } } tx.Commit() return nil } func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return nil, fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } var qs QuadStore conn, err := connect(addr, dr, options) if err != nil { return nil, err } localOpt, localOptOk, err := options.BoolKey("local_optimize") if err != nil { return nil, err } qs.db = conn qs.flavor = fl qs.size = -1 qs.sizes = lru.New(1024) qs.ids = lru.New(1024) // Skip size checking by default. qs.noSizes = true if localOptOk { if localOpt { qs.noSizes = false } } qs.useEstimates, _, err = options.BoolKey("use_estimates") if err != nil { return nil, err } return &qs, nil } func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) { s, err = pquads.MarshalValue(q.Subject) if err != nil { return } p, err = pquads.MarshalValue(q.Predicate) if err != nil { return } o, err = pquads.MarshalValue(q.Object) if err != nil { return } l, err = pquads.MarshalValue(q.Label) if err != nil { return } return } func escapeNullByte(s string) string { return strings.Replace(s, "\u0000", `\x00`, -1) } func unescapeNullByte(s string) string { return strings.Replace(s, `\x00`, "\u0000", -1) } func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) { var ( nodeKey int values = []interface{}{h.toSQL(), nil, nil}[:1] ) switch v := v.(type) { case quad.IRI: nodeKey = 1 values = append(values, string(v), true) case quad.BNode: nodeKey = 2 values = append(values, string(v), true) case quad.String: nodeKey = 3 values = append(values, escapeNullByte(string(v))) case quad.TypedString: nodeKey = 4 values = append(values, escapeNullByte(string(v.Value)), string(v.Type)) case quad.LangString: nodeKey = 5 values = append(values, escapeNullByte(string(v.Value)), v.Lang) case quad.Int: nodeKey = 6 values = append(values, int64(v)) case quad.Bool: nodeKey = 7 values = append(values, bool(v)) case quad.Float: nodeKey = 8 values = append(values, float64(v)) case quad.Time: nodeKey = 9 values = append(values, time.Time(v)) default: nodeKey = 0 p, err := pquads.MarshalValue(v) if err != nil { clog.Errorf("couldn't marshal value: %v", err) return 0, nil, err } values = append(values, p) } return nodeKey, values, nil } func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error { tx, err := qs.db.Begin() if err != nil { clog.Errorf("couldn't begin write transaction: %v", err) return err } err = qs.flavor.RunTx(tx, in, opts) if err != nil { tx.Rollback() return err } qs.size = -1 // TODO(barakmich): Sync size with writes. return tx.Commit() } func (qs *QuadStore) Quad(val graph.Value) quad.Quad { h := val.(QuadHashes) return quad.Quad{ Subject: qs.NameOf(h.Get(quad.Subject)), Predicate: qs.NameOf(h.Get(quad.Predicate)), Object: qs.NameOf(h.Get(quad.Object)), Label: qs.NameOf(h.Get(quad.Label)), } } func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator { return newSQLLinkIterator(qs, d, val.(NodeHash)) } func (qs *QuadStore) NodesAllIterator() graph.Iterator { return NewAllIterator(qs, "nodes") } func (qs *QuadStore) QuadsAllIterator() graph.Iterator { return NewAllIterator(qs, "quads") } func (qs *QuadStore) ValueOf(s quad.Value) graph.Value { return NodeHash(hashOf(s)) } // NullTime represents a time.Time that may be null. NullTime implements the // sql.Scanner interface so it can be used as a scan destination, similar to // sql.NullString. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. func (nt *NullTime) Scan(value interface{}) error { if value == nil { nt.Time, nt.Valid = time.Time{}, false return nil } switch value := value.(type) { case time.Time: nt.Time, nt.Valid = value, true case []byte: t, err := time.Parse("2006-01-02 15:04:05.999999", string(value)) if err != nil { return err } nt.Time, nt.Valid = t, true default: return fmt.Errorf("unsupported time format: %T: %v", value, value) } return nil } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil } func (qs *QuadStore) NameOf(v graph.Value) quad.Value { if v == nil { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } else if v, ok := v.(graph.PreFetchedValue); ok { return v.NameOf() } hash := v.(NodeHash) if !hash.Valid() { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } if val, ok := qs.ids.Get(hash.String()); ok { return val.(quad.Value) } query := `SELECT value, value_string, datatype, language, iri, bnode, value_int, value_bool, value_float, value_time FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;` c := qs.db.QueryRow(query, hash.toSQL()) var ( data []byte str sql.NullString typ sql.NullString lang sql.NullString iri sql.NullBool bnode sql.NullBool vint sql.NullInt64 vbool sql.NullBool vfloat sql.NullFloat64 vtime NullTime ) if err := c.Scan( &data, &str, &typ, &lang, &iri, &bnode, &vint, &vbool, &vfloat, &vtime, ); err != nil { clog.Errorf("Couldn't execute value lookup: %v", err) return nil } var val quad.Value if str.Valid { if iri.Bool { val = quad.IRI(str.String) } else if bnode.Bool { val = quad.BNode(str.String) } else if lang.Valid { val = quad.LangString{ Value: quad.String(unescapeNullByte(str.String)), Lang: lang.String, } } else if typ.Valid { val = quad.TypedString{ Value: quad.String(unescapeNullByte(str.String)), Type: quad.IRI(typ.String), } } else { val = quad.String(unescapeNullByte(str.String)) } } else if vint.Valid { val = quad.Int(vint.Int64) } else if vbool.Valid { val = quad.Bool(vbool.Bool) } else if vfloat.Valid { val = quad.Float(vfloat.Float64) } else if vtime.Valid { val = quad.Time(vtime.Time) } else { qv, err := pquads.UnmarshalValue(data) if err != nil { clog.Errorf("Couldn't unmarshal value: %v", err) return nil } val = qv } if val != nil { qs.ids.Put(hash.String(), val) } return val } func (qs *QuadStore) Size() int64 { if qs.size != -1 { return qs.size } query := "SELECT COUNT(*) FROM quads;" if qs.useEstimates && qs.flavor.Estimated != nil { query = qs.flavor.Estimated("quads") } c := qs.db.QueryRow(query) err := c.Scan(&qs.size) if err != nil { clog.Errorf("Couldn't execute COUNT: %v", err) return 0 } return qs.size } func (qs *QuadStore) Horizon() graph.PrimaryKey { var horizon int64 err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon) if err != nil { if err != sql.ErrNoRows { clog.Errorf("Couldn't execute horizon: %v", err) } return graph.NewSequentialKey(0) } return graph.NewSequentialKey(horizon) } func (qs *QuadStore) FixedIterator() graph.FixedIterator { return iterator.NewFixed(iterator.Identity) } func (qs *QuadStore) Close() error { return qs.db.Close() } func (qs *QuadStore) QuadDirection(in graph.Value, d quad.Direction) graph.Value { return NodeHash(in.(QuadHashes).Get(d)) } func (qs *QuadStore) Type() string { return QuadStoreType } func (qs *QuadStore)
(isAll bool, dir quad.Direction, hash NodeHash) int64 { var err error if isAll { return qs.Size() } if qs.noSizes { if dir == quad.Predicate { return (qs.Size() / 100) + 1 } return (qs.Size() / 1000) + 1 } if val, ok := qs.sizes.Get(hash.String() + string(dir.Prefix())); ok { return val.(int64) } var size int64 if clog.V(4) { clog.Infof("sql: getting size for select %s, %v", dir.String(), hash) } err = qs.db.QueryRow( fmt.Sprintf("SELECT count(*) FROM quads WHERE %s_hash = "+qs.flavor.Placeholder(1)+";", dir.String()), hash.toSQL()).Scan(&size) if err != nil { clog.Errorf("Error getting size from SQL database: %v", err) return 0 } qs.sizes.Put(hash.String()+string(dir.Prefix()), size) return size }
sizeForIterator
identifier_name
quadstore.go
package sql import ( "database/sql" "database/sql/driver" "encoding/hex" "fmt" "strings" "time" "github.com/cayleygraph/cayley/clog" "github.com/cayleygraph/cayley/graph" "github.com/cayleygraph/cayley/graph/iterator" "github.com/cayleygraph/cayley/internal/lru" "github.com/cayleygraph/cayley/quad" "github.com/cayleygraph/cayley/quad/pquads" ) const QuadStoreType = "sql" func init() { graph.RegisterQuadStore(QuadStoreType, graph.QuadStoreRegistration{ NewFunc: newQuadStore, UpgradeFunc: nil, InitFunc: createSQLTables, IsPersistent: true, }) } type NodeHash [quad.HashSize]byte func (NodeHash) IsNode() bool { return true } func (h NodeHash) Key() interface{} { return h } func (h NodeHash) Valid() bool { return h != NodeHash{} } func (h NodeHash) toSQL() interface{} { if !h.Valid() { return nil } return []byte(h[:]) } func (h NodeHash) String() string { if !h.Valid() { return "" } return hex.EncodeToString(h[:]) } func (h *NodeHash) Scan(src interface{}) error { if src == nil { *h = NodeHash{} return nil } b, ok := src.([]byte) if !ok { return fmt.Errorf("cannot scan %T to NodeHash", src) } if len(b) == 0 { *h = NodeHash{} return nil } else if len(b) != quad.HashSize { return fmt.Errorf("unexpected hash length: %d", len(b)) } copy((*h)[:], b) return nil } func hashOf(s quad.Value) (out NodeHash) { if s == nil { return } quad.HashTo(s, out[:]) return } type QuadHashes [4]NodeHash func (QuadHashes) IsNode() bool { return false } func (q QuadHashes) Key() interface{} { return q } func (q QuadHashes) Get(d quad.Direction) NodeHash { switch d { case quad.Subject: return q[0] case quad.Predicate: return q[1] case quad.Object: return q[2] case quad.Label: return q[3] } panic(fmt.Errorf("unknown direction: %v", d)) } type QuadStore struct { db *sql.DB flavor Flavor size int64 ids *lru.Cache sizes *lru.Cache noSizes bool useEstimates bool } type Flavor struct { Name string Driver string NodesTable string QuadsTable string FieldQuote rune Placeholder func(int) string Indexes func(graph.Options) []string Error func(error) error Estimated func(table string) string RunTx func(tx *sql.Tx, in []graph.Delta, opts graph.IgnoreOpts) error NoSchemaChangesInTx bool } var flavors = make(map[string]Flavor) func RegisterFlavor(f Flavor) { flavors[f.Name] = f } const defaultFlavor = "postgres" func connect(addr string, flavor string, opts graph.Options) (*sql.DB, error) { // TODO(barakmich): Parse options for more friendly addr conn, err := sql.Open(flavor, addr) if err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } // "Open may just validate its arguments without creating a connection to the database." // "To verify that the data source name is valid, call Ping." // Source: http://golang.org/pkg/database/sql/#Open if err := conn.Ping(); err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } return conn, nil } var nodesColumns = []string{ "hash", "value", "value_string", "datatype", "language", "iri", "bnode", "value_int", "value_bool", "value_float", "value_time", } var nodeInsertColumns = [][]string{ {"value"}, {"value_string", "iri"}, {"value_string", "bnode"}, {"value_string"}, {"value_string", "datatype"}, {"value_string", "language"}, {"value_int"}, {"value_bool"}, {"value_float"}, {"value_time"}, } func createSQLTables(addr string, options graph.Options) error { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } conn, err := connect(addr, dr, options) if err != nil { return err } defer conn.Close() if fl.NoSchemaChangesInTx { _, err = conn.Exec(fl.NodesTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = conn.Exec(fl.QuadsTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = conn.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) return err } } return nil } tx, err := conn.Begin() if err != nil { clog.Errorf("Couldn't begin creation transaction: %s", err) return err } _, err = tx.Exec(fl.NodesTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = tx.Exec(fl.QuadsTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = tx.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) tx.Rollback() return err } } tx.Commit() return nil } func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return nil, fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } var qs QuadStore conn, err := connect(addr, dr, options) if err != nil { return nil, err } localOpt, localOptOk, err := options.BoolKey("local_optimize") if err != nil { return nil, err } qs.db = conn qs.flavor = fl qs.size = -1 qs.sizes = lru.New(1024) qs.ids = lru.New(1024) // Skip size checking by default. qs.noSizes = true if localOptOk { if localOpt { qs.noSizes = false } } qs.useEstimates, _, err = options.BoolKey("use_estimates") if err != nil { return nil, err } return &qs, nil } func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) { s, err = pquads.MarshalValue(q.Subject) if err != nil { return } p, err = pquads.MarshalValue(q.Predicate) if err != nil { return } o, err = pquads.MarshalValue(q.Object) if err != nil { return } l, err = pquads.MarshalValue(q.Label) if err != nil { return } return } func escapeNullByte(s string) string { return strings.Replace(s, "\u0000", `\x00`, -1) } func unescapeNullByte(s string) string { return strings.Replace(s, `\x00`, "\u0000", -1) } func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) { var ( nodeKey int values = []interface{}{h.toSQL(), nil, nil}[:1] ) switch v := v.(type) { case quad.IRI: nodeKey = 1 values = append(values, string(v), true) case quad.BNode: nodeKey = 2 values = append(values, string(v), true) case quad.String: nodeKey = 3 values = append(values, escapeNullByte(string(v))) case quad.TypedString: nodeKey = 4 values = append(values, escapeNullByte(string(v.Value)), string(v.Type)) case quad.LangString: nodeKey = 5 values = append(values, escapeNullByte(string(v.Value)), v.Lang) case quad.Int: nodeKey = 6 values = append(values, int64(v)) case quad.Bool: nodeKey = 7 values = append(values, bool(v)) case quad.Float: nodeKey = 8 values = append(values, float64(v)) case quad.Time: nodeKey = 9 values = append(values, time.Time(v)) default: nodeKey = 0 p, err := pquads.MarshalValue(v) if err != nil { clog.Errorf("couldn't marshal value: %v", err) return 0, nil, err } values = append(values, p) } return nodeKey, values, nil } func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error { tx, err := qs.db.Begin() if err != nil { clog.Errorf("couldn't begin write transaction: %v", err) return err } err = qs.flavor.RunTx(tx, in, opts) if err != nil { tx.Rollback() return err } qs.size = -1 // TODO(barakmich): Sync size with writes. return tx.Commit() } func (qs *QuadStore) Quad(val graph.Value) quad.Quad { h := val.(QuadHashes) return quad.Quad{ Subject: qs.NameOf(h.Get(quad.Subject)), Predicate: qs.NameOf(h.Get(quad.Predicate)), Object: qs.NameOf(h.Get(quad.Object)), Label: qs.NameOf(h.Get(quad.Label)), } } func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator { return newSQLLinkIterator(qs, d, val.(NodeHash)) } func (qs *QuadStore) NodesAllIterator() graph.Iterator { return NewAllIterator(qs, "nodes") } func (qs *QuadStore) QuadsAllIterator() graph.Iterator { return NewAllIterator(qs, "quads") } func (qs *QuadStore) ValueOf(s quad.Value) graph.Value { return NodeHash(hashOf(s)) } // NullTime represents a time.Time that may be null. NullTime implements the // sql.Scanner interface so it can be used as a scan destination, similar to // sql.NullString. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. func (nt *NullTime) Scan(value interface{}) error { if value == nil { nt.Time, nt.Valid = time.Time{}, false return nil } switch value := value.(type) { case time.Time: nt.Time, nt.Valid = value, true case []byte: t, err := time.Parse("2006-01-02 15:04:05.999999", string(value)) if err != nil { return err } nt.Time, nt.Valid = t, true default: return fmt.Errorf("unsupported time format: %T: %v", value, value) } return nil } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil } func (qs *QuadStore) NameOf(v graph.Value) quad.Value { if v == nil { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } else if v, ok := v.(graph.PreFetchedValue); ok { return v.NameOf() } hash := v.(NodeHash) if !hash.Valid() { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } if val, ok := qs.ids.Get(hash.String()); ok { return val.(quad.Value) } query := `SELECT value, value_string, datatype, language, iri, bnode, value_int, value_bool, value_float, value_time FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;` c := qs.db.QueryRow(query, hash.toSQL()) var ( data []byte str sql.NullString typ sql.NullString lang sql.NullString iri sql.NullBool bnode sql.NullBool vint sql.NullInt64 vbool sql.NullBool vfloat sql.NullFloat64 vtime NullTime ) if err := c.Scan( &data, &str, &typ, &lang, &iri, &bnode, &vint, &vbool, &vfloat, &vtime, ); err != nil { clog.Errorf("Couldn't execute value lookup: %v", err) return nil } var val quad.Value if str.Valid { if iri.Bool { val = quad.IRI(str.String) } else if bnode.Bool { val = quad.BNode(str.String) } else if lang.Valid { val = quad.LangString{ Value: quad.String(unescapeNullByte(str.String)), Lang: lang.String, } } else if typ.Valid { val = quad.TypedString{ Value: quad.String(unescapeNullByte(str.String)), Type: quad.IRI(typ.String), } } else { val = quad.String(unescapeNullByte(str.String)) } } else if vint.Valid { val = quad.Int(vint.Int64) } else if vbool.Valid { val = quad.Bool(vbool.Bool) } else if vfloat.Valid { val = quad.Float(vfloat.Float64) } else if vtime.Valid { val = quad.Time(vtime.Time) } else { qv, err := pquads.UnmarshalValue(data) if err != nil { clog.Errorf("Couldn't unmarshal value: %v", err) return nil } val = qv } if val != nil { qs.ids.Put(hash.String(), val) } return val } func (qs *QuadStore) Size() int64 { if qs.size != -1 { return qs.size } query := "SELECT COUNT(*) FROM quads;" if qs.useEstimates && qs.flavor.Estimated != nil { query = qs.flavor.Estimated("quads") } c := qs.db.QueryRow(query) err := c.Scan(&qs.size) if err != nil { clog.Errorf("Couldn't execute COUNT: %v", err) return 0 } return qs.size } func (qs *QuadStore) Horizon() graph.PrimaryKey { var horizon int64 err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon) if err != nil
return graph.NewSequentialKey(horizon) } func (qs *QuadStore) FixedIterator() graph.FixedIterator { return iterator.NewFixed(iterator.Identity) } func (qs *QuadStore) Close() error { return qs.db.Close() } func (qs *QuadStore) QuadDirection(in graph.Value, d quad.Direction) graph.Value { return NodeHash(in.(QuadHashes).Get(d)) } func (qs *QuadStore) Type() string { return QuadStoreType } func (qs *QuadStore) sizeForIterator(isAll bool, dir quad.Direction, hash NodeHash) int64 { var err error if isAll { return qs.Size() } if qs.noSizes { if dir == quad.Predicate { return (qs.Size() / 100) + 1 } return (qs.Size() / 1000) + 1 } if val, ok := qs.sizes.Get(hash.String() + string(dir.Prefix())); ok { return val.(int64) } var size int64 if clog.V(4) { clog.Infof("sql: getting size for select %s, %v", dir.String(), hash) } err = qs.db.QueryRow( fmt.Sprintf("SELECT count(*) FROM quads WHERE %s_hash = "+qs.flavor.Placeholder(1)+";", dir.String()), hash.toSQL()).Scan(&size) if err != nil { clog.Errorf("Error getting size from SQL database: %v", err) return 0 } qs.sizes.Put(hash.String()+string(dir.Prefix()), size) return size }
{ if err != sql.ErrNoRows { clog.Errorf("Couldn't execute horizon: %v", err) } return graph.NewSequentialKey(0) }
conditional_block
quadstore.go
package sql import ( "database/sql" "database/sql/driver" "encoding/hex" "fmt" "strings" "time" "github.com/cayleygraph/cayley/clog" "github.com/cayleygraph/cayley/graph" "github.com/cayleygraph/cayley/graph/iterator" "github.com/cayleygraph/cayley/internal/lru" "github.com/cayleygraph/cayley/quad" "github.com/cayleygraph/cayley/quad/pquads" ) const QuadStoreType = "sql" func init() { graph.RegisterQuadStore(QuadStoreType, graph.QuadStoreRegistration{ NewFunc: newQuadStore, UpgradeFunc: nil, InitFunc: createSQLTables, IsPersistent: true, }) } type NodeHash [quad.HashSize]byte func (NodeHash) IsNode() bool { return true } func (h NodeHash) Key() interface{} { return h } func (h NodeHash) Valid() bool { return h != NodeHash{} } func (h NodeHash) toSQL() interface{} { if !h.Valid() { return nil } return []byte(h[:]) } func (h NodeHash) String() string { if !h.Valid() { return "" } return hex.EncodeToString(h[:]) } func (h *NodeHash) Scan(src interface{}) error { if src == nil { *h = NodeHash{} return nil } b, ok := src.([]byte) if !ok { return fmt.Errorf("cannot scan %T to NodeHash", src) } if len(b) == 0 { *h = NodeHash{} return nil } else if len(b) != quad.HashSize { return fmt.Errorf("unexpected hash length: %d", len(b)) } copy((*h)[:], b) return nil } func hashOf(s quad.Value) (out NodeHash) { if s == nil { return } quad.HashTo(s, out[:]) return } type QuadHashes [4]NodeHash func (QuadHashes) IsNode() bool { return false } func (q QuadHashes) Key() interface{} { return q } func (q QuadHashes) Get(d quad.Direction) NodeHash { switch d { case quad.Subject: return q[0] case quad.Predicate: return q[1] case quad.Object: return q[2] case quad.Label: return q[3] } panic(fmt.Errorf("unknown direction: %v", d)) } type QuadStore struct { db *sql.DB flavor Flavor size int64 ids *lru.Cache sizes *lru.Cache noSizes bool useEstimates bool } type Flavor struct { Name string Driver string NodesTable string QuadsTable string FieldQuote rune Placeholder func(int) string Indexes func(graph.Options) []string Error func(error) error Estimated func(table string) string RunTx func(tx *sql.Tx, in []graph.Delta, opts graph.IgnoreOpts) error NoSchemaChangesInTx bool } var flavors = make(map[string]Flavor) func RegisterFlavor(f Flavor) { flavors[f.Name] = f } const defaultFlavor = "postgres" func connect(addr string, flavor string, opts graph.Options) (*sql.DB, error) { // TODO(barakmich): Parse options for more friendly addr conn, err := sql.Open(flavor, addr) if err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } // "Open may just validate its arguments without creating a connection to the database." // "To verify that the data source name is valid, call Ping." // Source: http://golang.org/pkg/database/sql/#Open if err := conn.Ping(); err != nil { clog.Errorf("Couldn't open database at %s: %#v", addr, err) return nil, err } return conn, nil } var nodesColumns = []string{ "hash", "value", "value_string", "datatype", "language", "iri", "bnode", "value_int", "value_bool", "value_float", "value_time", } var nodeInsertColumns = [][]string{ {"value"}, {"value_string", "iri"}, {"value_string", "bnode"}, {"value_string"}, {"value_string", "datatype"}, {"value_string", "language"}, {"value_int"}, {"value_bool"}, {"value_float"}, {"value_time"}, } func createSQLTables(addr string, options graph.Options) error { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } conn, err := connect(addr, dr, options) if err != nil { return err } defer conn.Close() if fl.NoSchemaChangesInTx { _, err = conn.Exec(fl.NodesTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = conn.Exec(fl.QuadsTable) if err != nil { err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = conn.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) return err } } return nil } tx, err := conn.Begin() if err != nil { clog.Errorf("Couldn't begin creation transaction: %s", err) return err } _, err = tx.Exec(fl.NodesTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create nodes table: %v", err) return err } _, err = tx.Exec(fl.QuadsTable) if err != nil { tx.Rollback() err = fl.Error(err) clog.Errorf("Cannot create quad table: %v", err) return err } for _, index := range fl.Indexes(options) { if _, err = tx.Exec(index); err != nil { clog.Errorf("Cannot create index: %v", err) tx.Rollback() return err } } tx.Commit() return nil } func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) { flavor, _, _ := options.StringKey("flavor") if flavor == "" { flavor = defaultFlavor } fl, ok := flavors[flavor] if !ok { return nil, fmt.Errorf("unsupported sql flavor: %s", flavor) } dr := fl.Driver if dr == "" { dr = fl.Name } var qs QuadStore conn, err := connect(addr, dr, options) if err != nil { return nil, err } localOpt, localOptOk, err := options.BoolKey("local_optimize") if err != nil { return nil, err } qs.db = conn qs.flavor = fl qs.size = -1 qs.sizes = lru.New(1024) qs.ids = lru.New(1024) // Skip size checking by default. qs.noSizes = true if localOptOk { if localOpt { qs.noSizes = false } } qs.useEstimates, _, err = options.BoolKey("use_estimates") if err != nil { return nil, err } return &qs, nil } func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) { s, err = pquads.MarshalValue(q.Subject) if err != nil { return } p, err = pquads.MarshalValue(q.Predicate) if err != nil { return } o, err = pquads.MarshalValue(q.Object) if err != nil { return } l, err = pquads.MarshalValue(q.Label) if err != nil { return } return } func escapeNullByte(s string) string { return strings.Replace(s, "\u0000", `\x00`, -1) } func unescapeNullByte(s string) string { return strings.Replace(s, `\x00`, "\u0000", -1) } func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) { var ( nodeKey int values = []interface{}{h.toSQL(), nil, nil}[:1] ) switch v := v.(type) { case quad.IRI: nodeKey = 1 values = append(values, string(v), true) case quad.BNode: nodeKey = 2 values = append(values, string(v), true) case quad.String: nodeKey = 3 values = append(values, escapeNullByte(string(v))) case quad.TypedString: nodeKey = 4 values = append(values, escapeNullByte(string(v.Value)), string(v.Type)) case quad.LangString: nodeKey = 5 values = append(values, escapeNullByte(string(v.Value)), v.Lang) case quad.Int: nodeKey = 6 values = append(values, int64(v)) case quad.Bool: nodeKey = 7 values = append(values, bool(v)) case quad.Float: nodeKey = 8 values = append(values, float64(v)) case quad.Time: nodeKey = 9 values = append(values, time.Time(v)) default: nodeKey = 0 p, err := pquads.MarshalValue(v) if err != nil { clog.Errorf("couldn't marshal value: %v", err) return 0, nil, err } values = append(values, p) } return nodeKey, values, nil } func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error { tx, err := qs.db.Begin() if err != nil { clog.Errorf("couldn't begin write transaction: %v", err) return err } err = qs.flavor.RunTx(tx, in, opts) if err != nil { tx.Rollback() return err } qs.size = -1 // TODO(barakmich): Sync size with writes. return tx.Commit() } func (qs *QuadStore) Quad(val graph.Value) quad.Quad { h := val.(QuadHashes) return quad.Quad{ Subject: qs.NameOf(h.Get(quad.Subject)), Predicate: qs.NameOf(h.Get(quad.Predicate)), Object: qs.NameOf(h.Get(quad.Object)), Label: qs.NameOf(h.Get(quad.Label)), } } func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator { return newSQLLinkIterator(qs, d, val.(NodeHash)) } func (qs *QuadStore) NodesAllIterator() graph.Iterator { return NewAllIterator(qs, "nodes") } func (qs *QuadStore) QuadsAllIterator() graph.Iterator { return NewAllIterator(qs, "quads") } func (qs *QuadStore) ValueOf(s quad.Value) graph.Value { return NodeHash(hashOf(s)) } // NullTime represents a time.Time that may be null. NullTime implements the // sql.Scanner interface so it can be used as a scan destination, similar to // sql.NullString. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. func (nt *NullTime) Scan(value interface{}) error { if value == nil { nt.Time, nt.Valid = time.Time{}, false return nil } switch value := value.(type) { case time.Time: nt.Time, nt.Valid = value, true case []byte: t, err := time.Parse("2006-01-02 15:04:05.999999", string(value)) if err != nil { return err } nt.Time, nt.Valid = t, true default: return fmt.Errorf("unsupported time format: %T: %v", value, value) } return nil } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil } func (qs *QuadStore) NameOf(v graph.Value) quad.Value { if v == nil { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } else if v, ok := v.(graph.PreFetchedValue); ok { return v.NameOf() } hash := v.(NodeHash) if !hash.Valid() { if clog.V(2) { clog.Infof("NameOf was nil") } return nil } if val, ok := qs.ids.Get(hash.String()); ok { return val.(quad.Value) } query := `SELECT value, value_string, datatype, language, iri, bnode, value_int, value_bool, value_float, value_time FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;` c := qs.db.QueryRow(query, hash.toSQL()) var ( data []byte str sql.NullString typ sql.NullString lang sql.NullString iri sql.NullBool bnode sql.NullBool vint sql.NullInt64 vbool sql.NullBool vfloat sql.NullFloat64 vtime NullTime ) if err := c.Scan( &data, &str, &typ, &lang, &iri, &bnode, &vint, &vbool, &vfloat, &vtime, ); err != nil { clog.Errorf("Couldn't execute value lookup: %v", err) return nil } var val quad.Value if str.Valid { if iri.Bool { val = quad.IRI(str.String) } else if bnode.Bool { val = quad.BNode(str.String) } else if lang.Valid { val = quad.LangString{ Value: quad.String(unescapeNullByte(str.String)), Lang: lang.String, } } else if typ.Valid { val = quad.TypedString{ Value: quad.String(unescapeNullByte(str.String)), Type: quad.IRI(typ.String), } } else { val = quad.String(unescapeNullByte(str.String)) } } else if vint.Valid { val = quad.Int(vint.Int64) } else if vbool.Valid { val = quad.Bool(vbool.Bool) } else if vfloat.Valid { val = quad.Float(vfloat.Float64) } else if vtime.Valid { val = quad.Time(vtime.Time) } else { qv, err := pquads.UnmarshalValue(data) if err != nil { clog.Errorf("Couldn't unmarshal value: %v", err) return nil } val = qv } if val != nil { qs.ids.Put(hash.String(), val) } return val } func (qs *QuadStore) Size() int64 { if qs.size != -1 { return qs.size } query := "SELECT COUNT(*) FROM quads;" if qs.useEstimates && qs.flavor.Estimated != nil { query = qs.flavor.Estimated("quads") } c := qs.db.QueryRow(query) err := c.Scan(&qs.size) if err != nil { clog.Errorf("Couldn't execute COUNT: %v", err) return 0 } return qs.size } func (qs *QuadStore) Horizon() graph.PrimaryKey { var horizon int64 err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon) if err != nil { if err != sql.ErrNoRows { clog.Errorf("Couldn't execute horizon: %v", err) } return graph.NewSequentialKey(0) } return graph.NewSequentialKey(horizon) } func (qs *QuadStore) FixedIterator() graph.FixedIterator
func (qs *QuadStore) Close() error { return qs.db.Close() } func (qs *QuadStore) QuadDirection(in graph.Value, d quad.Direction) graph.Value { return NodeHash(in.(QuadHashes).Get(d)) } func (qs *QuadStore) Type() string { return QuadStoreType } func (qs *QuadStore) sizeForIterator(isAll bool, dir quad.Direction, hash NodeHash) int64 { var err error if isAll { return qs.Size() } if qs.noSizes { if dir == quad.Predicate { return (qs.Size() / 100) + 1 } return (qs.Size() / 1000) + 1 } if val, ok := qs.sizes.Get(hash.String() + string(dir.Prefix())); ok { return val.(int64) } var size int64 if clog.V(4) { clog.Infof("sql: getting size for select %s, %v", dir.String(), hash) } err = qs.db.QueryRow( fmt.Sprintf("SELECT count(*) FROM quads WHERE %s_hash = "+qs.flavor.Placeholder(1)+";", dir.String()), hash.toSQL()).Scan(&size) if err != nil { clog.Errorf("Error getting size from SQL database: %v", err) return 0 } qs.sizes.Put(hash.String()+string(dir.Prefix()), size) return size }
{ return iterator.NewFixed(iterator.Identity) }
identifier_body
tbs.rs
//! hash functions for DNSSec operations use super::rdata::{sig, DNSSECRData, SIG}; use crate::error::*; use crate::rr::dnssec::Algorithm; use crate::rr::{DNSClass, Name, RData, Record, RecordType}; use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode}; /// Data To Be Signed. pub struct TBS(Vec<u8>); impl<'a> From<&'a [u8]> for TBS { fn from(slice: &'a [u8]) -> Self { Self(slice.to_owned()) } } impl AsRef<[u8]> for TBS { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// Returns the to-be-signed serialization of the given message. pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> { // TODO: should perform the serialization and sign block by block to reduce the max memory // usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's // more important let mut buf: Vec<u8> = Vec::with_capacity(512); let mut buf2: Vec<u8> = Vec::with_capacity(512); { let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal); assert!(sig::emit_pre_sig( &mut encoder, pre_sig0.type_covered(), pre_sig0.algorithm(), pre_sig0.num_labels(), pre_sig0.original_ttl(), pre_sig0.sig_expiration(), pre_sig0.sig_inception(), pre_sig0.key_tag(), pre_sig0.signer_name(), ) .is_ok()); // need a separate encoder here, as the encoding references absolute positions // inside the buffer. If the buffer already contains the sig0 RDATA, offsets // are wrong and the signature won't match. let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing); message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?) } buf.append(&mut buf2); Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set. /// /// # Arguments /// /// * `name` - RRset record name /// * `dns_class` - DNSClass, i.e. IN, of the records /// * `num_labels` - number of labels in the name, needed to deal with `*.example.com` /// * `type_covered` - RecordType of the RRSet being hashed /// * `algorithm` - The Algorithm type used for the hashing /// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record /// * `sig_expiration` - the epoch seconds of when this hashed signature will expire /// * `key_inception` - the epoch seconds of when this hashed signature will be valid /// * `signer_name` - label of the etity responsible for signing this hash /// * `records` - RRSet to hash /// /// # Returns /// /// the binary hash of the specified RRSet and associated information // FIXME: OMG, there are a ton of asserts in here... #[allow(clippy::too_many_arguments)] pub fn rrset_tbs( name: &Name, dns_class: DNSClass, num_labels: u8, type_covered: RecordType, algorithm: Algorithm, original_ttl: u32, sig_expiration: u32, sig_inception: u32, key_tag: u16, signer_name: &Name, records: &[Record], ) -> ProtoResult<TBS> { // TODO: change this to a BTreeSet so that it's preordered, no sort necessary let mut rrset: Vec<&Record> = Vec::new(); // collect only the records for this rrset for record in records { if dns_class == record.dns_class() && type_covered == record.rr_type() && name == record.name() { rrset.push(record); } } // put records in canonical order rrset.sort(); let name = determine_name(name, num_labels)?; // TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations... let mut buf: Vec<u8> = Vec::new(); { let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf); encoder.set_canonical_names(true); // signed_data = RRSIG_RDATA | RR(1) | RR(2)... where // // "|" denotes concatenation // // RRSIG_RDATA is the wire format of the RRSIG RDATA fields // with the Signature field excluded and the Signer's Name // in canonical form. assert!(sig::emit_pre_sig( &mut encoder, type_covered, algorithm, name.num_labels(), original_ttl, sig_expiration, sig_inception, key_tag, signer_name, ) .is_ok()); // construct the rrset signing data for record in rrset { // RR(i) = name | type | class | OrigTTL | RDATA length | RDATA // // name is calculated according to the function in the RFC 4035 assert!(name .to_lowercase() .emit_as_canonical(&mut encoder, true) .is_ok()); // // type is the RRset type and all RRs in the class assert!(type_covered.emit(&mut encoder).is_ok()); // // class is the RRset's class assert!(dns_class.emit(&mut encoder).is_ok()); // // OrigTTL is the value from the RRSIG Original TTL field assert!(encoder.emit_u32(original_ttl).is_ok()); // // RDATA length // TODO: add support to the encoder to set a marker to go back and write the length let mut rdata_buf = Vec::new(); { let mut rdata_encoder = BinEncoder::new(&mut rdata_buf); rdata_encoder.set_canonical_names(true); if let Some(rdata) = record.data() { assert!(rdata.emit(&mut rdata_encoder).is_ok()); } } assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok()); // // All names in the RDATA field are in canonical form (set above) assert!(encoder.emit_vec(&rdata_buf).is_ok()); } } Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the RRSIG record. /// /// # Arguments /// /// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> { if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() { rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records) } else { Err(format!("could not determine name from {}", rrsig.name()).into()) } } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the SIG record. /// /// # Arguments /// /// * `name` - labels of the record to sign /// * `dns_class` - DNSClass of the RRSet, i.e. IN /// * `sig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn
( name: &Name, dns_class: DNSClass, sig: &SIG, records: &[Record], ) -> ProtoResult<TBS> { rrset_tbs( name, dns_class, sig.num_labels(), sig.type_covered(), sig.algorithm(), sig.original_ttl(), sig.sig_expiration(), sig.sig_inception(), sig.key_tag(), sig.signer_name(), records, ) } /// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005 /// /// ```text /// /// 5.3.2. Reconstructing the Signed Data /// ... /// To calculate the name: /// let rrsig_labels = the value of the RRSIG Labels field /// /// let fqdn = RRset's fully qualified domain name in /// canonical form /// /// let fqdn_labels = Label count of the fqdn above. /// /// if rrsig_labels = fqdn_labels, /// name = fqdn /// /// if rrsig_labels < fqdn_labels, /// name = "*." | the rightmost rrsig_label labels of the /// fqdn /// /// if rrsig_labels > fqdn_labels /// the RRSIG RR did not pass the necessary validation /// checks and MUST NOT be used to authenticate this /// RRset. /// /// The canonical forms for names and RRsets are defined in [RFC4034]. /// ``` pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> { // To calculate the name: // let rrsig_labels = the value of the RRSIG Labels field // // let fqdn = RRset's fully qualified domain name in // canonical form // // let fqdn_labels = Label count of the fqdn above. let fqdn_labels = name.num_labels(); // if rrsig_labels = fqdn_labels, // name = fqdn if fqdn_labels == num_labels { return Ok(name.clone()); } // if rrsig_labels < fqdn_labels, // name = "*." | the rightmost rrsig_label labels of the // fqdn if num_labels < fqdn_labels { let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap(); let rightmost = name.trim_to(num_labels as usize); if !rightmost.is_root() { star_name = star_name.append_name(&rightmost)?; return Ok(star_name); } return Ok(star_name); } // // if rrsig_labels > fqdn_labels // the RRSIG RR did not pass the necessary validation // checks and MUST NOT be used to authenticate this // RRset. Err(format!("could not determine name from {}", name).into()) }
rrset_tbs_with_sig
identifier_name
tbs.rs
//! hash functions for DNSSec operations use super::rdata::{sig, DNSSECRData, SIG}; use crate::error::*; use crate::rr::dnssec::Algorithm; use crate::rr::{DNSClass, Name, RData, Record, RecordType}; use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode}; /// Data To Be Signed. pub struct TBS(Vec<u8>); impl<'a> From<&'a [u8]> for TBS { fn from(slice: &'a [u8]) -> Self { Self(slice.to_owned()) } } impl AsRef<[u8]> for TBS { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// Returns the to-be-signed serialization of the given message. pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> { // TODO: should perform the serialization and sign block by block to reduce the max memory // usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's // more important let mut buf: Vec<u8> = Vec::with_capacity(512); let mut buf2: Vec<u8> = Vec::with_capacity(512); { let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal); assert!(sig::emit_pre_sig( &mut encoder, pre_sig0.type_covered(), pre_sig0.algorithm(), pre_sig0.num_labels(), pre_sig0.original_ttl(), pre_sig0.sig_expiration(), pre_sig0.sig_inception(), pre_sig0.key_tag(), pre_sig0.signer_name(), ) .is_ok()); // need a separate encoder here, as the encoding references absolute positions // inside the buffer. If the buffer already contains the sig0 RDATA, offsets // are wrong and the signature won't match. let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing); message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?) } buf.append(&mut buf2); Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set. /// /// # Arguments /// /// * `name` - RRset record name /// * `dns_class` - DNSClass, i.e. IN, of the records /// * `num_labels` - number of labels in the name, needed to deal with `*.example.com` /// * `type_covered` - RecordType of the RRSet being hashed /// * `algorithm` - The Algorithm type used for the hashing /// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record /// * `sig_expiration` - the epoch seconds of when this hashed signature will expire /// * `key_inception` - the epoch seconds of when this hashed signature will be valid /// * `signer_name` - label of the etity responsible for signing this hash /// * `records` - RRSet to hash /// /// # Returns /// /// the binary hash of the specified RRSet and associated information // FIXME: OMG, there are a ton of asserts in here... #[allow(clippy::too_many_arguments)] pub fn rrset_tbs( name: &Name, dns_class: DNSClass, num_labels: u8, type_covered: RecordType, algorithm: Algorithm, original_ttl: u32, sig_expiration: u32, sig_inception: u32, key_tag: u16, signer_name: &Name, records: &[Record], ) -> ProtoResult<TBS>
/// Returns the to-be-signed serialization of the given record set using the information /// provided from the RRSIG record. /// /// # Arguments /// /// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> { if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() { rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records) } else { Err(format!("could not determine name from {}", rrsig.name()).into()) } } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the SIG record. /// /// # Arguments /// /// * `name` - labels of the record to sign /// * `dns_class` - DNSClass of the RRSet, i.e. IN /// * `sig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_sig( name: &Name, dns_class: DNSClass, sig: &SIG, records: &[Record], ) -> ProtoResult<TBS> { rrset_tbs( name, dns_class, sig.num_labels(), sig.type_covered(), sig.algorithm(), sig.original_ttl(), sig.sig_expiration(), sig.sig_inception(), sig.key_tag(), sig.signer_name(), records, ) } /// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005 /// /// ```text /// /// 5.3.2. Reconstructing the Signed Data /// ... /// To calculate the name: /// let rrsig_labels = the value of the RRSIG Labels field /// /// let fqdn = RRset's fully qualified domain name in /// canonical form /// /// let fqdn_labels = Label count of the fqdn above. /// /// if rrsig_labels = fqdn_labels, /// name = fqdn /// /// if rrsig_labels < fqdn_labels, /// name = "*." | the rightmost rrsig_label labels of the /// fqdn /// /// if rrsig_labels > fqdn_labels /// the RRSIG RR did not pass the necessary validation /// checks and MUST NOT be used to authenticate this /// RRset. /// /// The canonical forms for names and RRsets are defined in [RFC4034]. /// ``` pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> { // To calculate the name: // let rrsig_labels = the value of the RRSIG Labels field // // let fqdn = RRset's fully qualified domain name in // canonical form // // let fqdn_labels = Label count of the fqdn above. let fqdn_labels = name.num_labels(); // if rrsig_labels = fqdn_labels, // name = fqdn if fqdn_labels == num_labels { return Ok(name.clone()); } // if rrsig_labels < fqdn_labels, // name = "*." | the rightmost rrsig_label labels of the // fqdn if num_labels < fqdn_labels { let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap(); let rightmost = name.trim_to(num_labels as usize); if !rightmost.is_root() { star_name = star_name.append_name(&rightmost)?; return Ok(star_name); } return Ok(star_name); } // // if rrsig_labels > fqdn_labels // the RRSIG RR did not pass the necessary validation // checks and MUST NOT be used to authenticate this // RRset. Err(format!("could not determine name from {}", name).into()) }
{ // TODO: change this to a BTreeSet so that it's preordered, no sort necessary let mut rrset: Vec<&Record> = Vec::new(); // collect only the records for this rrset for record in records { if dns_class == record.dns_class() && type_covered == record.rr_type() && name == record.name() { rrset.push(record); } } // put records in canonical order rrset.sort(); let name = determine_name(name, num_labels)?; // TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations... let mut buf: Vec<u8> = Vec::new(); { let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf); encoder.set_canonical_names(true); // signed_data = RRSIG_RDATA | RR(1) | RR(2)... where // // "|" denotes concatenation // // RRSIG_RDATA is the wire format of the RRSIG RDATA fields // with the Signature field excluded and the Signer's Name // in canonical form. assert!(sig::emit_pre_sig( &mut encoder, type_covered, algorithm, name.num_labels(), original_ttl, sig_expiration, sig_inception, key_tag, signer_name, ) .is_ok()); // construct the rrset signing data for record in rrset { // RR(i) = name | type | class | OrigTTL | RDATA length | RDATA // // name is calculated according to the function in the RFC 4035 assert!(name .to_lowercase() .emit_as_canonical(&mut encoder, true) .is_ok()); // // type is the RRset type and all RRs in the class assert!(type_covered.emit(&mut encoder).is_ok()); // // class is the RRset's class assert!(dns_class.emit(&mut encoder).is_ok()); // // OrigTTL is the value from the RRSIG Original TTL field assert!(encoder.emit_u32(original_ttl).is_ok()); // // RDATA length // TODO: add support to the encoder to set a marker to go back and write the length let mut rdata_buf = Vec::new(); { let mut rdata_encoder = BinEncoder::new(&mut rdata_buf); rdata_encoder.set_canonical_names(true); if let Some(rdata) = record.data() { assert!(rdata.emit(&mut rdata_encoder).is_ok()); } } assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok()); // // All names in the RDATA field are in canonical form (set above) assert!(encoder.emit_vec(&rdata_buf).is_ok()); } } Ok(TBS(buf)) }
identifier_body
tbs.rs
//! hash functions for DNSSec operations use super::rdata::{sig, DNSSECRData, SIG}; use crate::error::*; use crate::rr::dnssec::Algorithm; use crate::rr::{DNSClass, Name, RData, Record, RecordType}; use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode}; /// Data To Be Signed. pub struct TBS(Vec<u8>); impl<'a> From<&'a [u8]> for TBS { fn from(slice: &'a [u8]) -> Self { Self(slice.to_owned()) } } impl AsRef<[u8]> for TBS { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// Returns the to-be-signed serialization of the given message. pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> { // TODO: should perform the serialization and sign block by block to reduce the max memory // usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's // more important let mut buf: Vec<u8> = Vec::with_capacity(512); let mut buf2: Vec<u8> = Vec::with_capacity(512); { let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal); assert!(sig::emit_pre_sig( &mut encoder, pre_sig0.type_covered(), pre_sig0.algorithm(), pre_sig0.num_labels(), pre_sig0.original_ttl(), pre_sig0.sig_expiration(), pre_sig0.sig_inception(), pre_sig0.key_tag(), pre_sig0.signer_name(), ) .is_ok()); // need a separate encoder here, as the encoding references absolute positions // inside the buffer. If the buffer already contains the sig0 RDATA, offsets // are wrong and the signature won't match. let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing); message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?) } buf.append(&mut buf2); Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set. /// /// # Arguments /// /// * `name` - RRset record name /// * `dns_class` - DNSClass, i.e. IN, of the records /// * `num_labels` - number of labels in the name, needed to deal with `*.example.com` /// * `type_covered` - RecordType of the RRSet being hashed /// * `algorithm` - The Algorithm type used for the hashing /// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record /// * `sig_expiration` - the epoch seconds of when this hashed signature will expire /// * `key_inception` - the epoch seconds of when this hashed signature will be valid /// * `signer_name` - label of the etity responsible for signing this hash /// * `records` - RRSet to hash /// /// # Returns /// /// the binary hash of the specified RRSet and associated information // FIXME: OMG, there are a ton of asserts in here... #[allow(clippy::too_many_arguments)] pub fn rrset_tbs( name: &Name, dns_class: DNSClass, num_labels: u8, type_covered: RecordType, algorithm: Algorithm, original_ttl: u32, sig_expiration: u32, sig_inception: u32, key_tag: u16, signer_name: &Name, records: &[Record], ) -> ProtoResult<TBS> { // TODO: change this to a BTreeSet so that it's preordered, no sort necessary let mut rrset: Vec<&Record> = Vec::new(); // collect only the records for this rrset for record in records { if dns_class == record.dns_class() && type_covered == record.rr_type() && name == record.name() { rrset.push(record); } } // put records in canonical order rrset.sort(); let name = determine_name(name, num_labels)?; // TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations... let mut buf: Vec<u8> = Vec::new(); { let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf); encoder.set_canonical_names(true); // signed_data = RRSIG_RDATA | RR(1) | RR(2)... where // // "|" denotes concatenation // // RRSIG_RDATA is the wire format of the RRSIG RDATA fields // with the Signature field excluded and the Signer's Name // in canonical form. assert!(sig::emit_pre_sig( &mut encoder, type_covered, algorithm, name.num_labels(), original_ttl, sig_expiration, sig_inception, key_tag, signer_name, ) .is_ok()); // construct the rrset signing data for record in rrset { // RR(i) = name | type | class | OrigTTL | RDATA length | RDATA // // name is calculated according to the function in the RFC 4035 assert!(name .to_lowercase() .emit_as_canonical(&mut encoder, true) .is_ok()); // // type is the RRset type and all RRs in the class assert!(type_covered.emit(&mut encoder).is_ok()); // // class is the RRset's class assert!(dns_class.emit(&mut encoder).is_ok()); // // OrigTTL is the value from the RRSIG Original TTL field assert!(encoder.emit_u32(original_ttl).is_ok()); // // RDATA length // TODO: add support to the encoder to set a marker to go back and write the length let mut rdata_buf = Vec::new();
{ let mut rdata_encoder = BinEncoder::new(&mut rdata_buf); rdata_encoder.set_canonical_names(true); if let Some(rdata) = record.data() { assert!(rdata.emit(&mut rdata_encoder).is_ok()); } } assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok()); // // All names in the RDATA field are in canonical form (set above) assert!(encoder.emit_vec(&rdata_buf).is_ok()); } } Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the RRSIG record. /// /// # Arguments /// /// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> { if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() { rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records) } else { Err(format!("could not determine name from {}", rrsig.name()).into()) } } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the SIG record. /// /// # Arguments /// /// * `name` - labels of the record to sign /// * `dns_class` - DNSClass of the RRSet, i.e. IN /// * `sig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_sig( name: &Name, dns_class: DNSClass, sig: &SIG, records: &[Record], ) -> ProtoResult<TBS> { rrset_tbs( name, dns_class, sig.num_labels(), sig.type_covered(), sig.algorithm(), sig.original_ttl(), sig.sig_expiration(), sig.sig_inception(), sig.key_tag(), sig.signer_name(), records, ) } /// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005 /// /// ```text /// /// 5.3.2. Reconstructing the Signed Data /// ... /// To calculate the name: /// let rrsig_labels = the value of the RRSIG Labels field /// /// let fqdn = RRset's fully qualified domain name in /// canonical form /// /// let fqdn_labels = Label count of the fqdn above. /// /// if rrsig_labels = fqdn_labels, /// name = fqdn /// /// if rrsig_labels < fqdn_labels, /// name = "*." | the rightmost rrsig_label labels of the /// fqdn /// /// if rrsig_labels > fqdn_labels /// the RRSIG RR did not pass the necessary validation /// checks and MUST NOT be used to authenticate this /// RRset. /// /// The canonical forms for names and RRsets are defined in [RFC4034]. /// ``` pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> { // To calculate the name: // let rrsig_labels = the value of the RRSIG Labels field // // let fqdn = RRset's fully qualified domain name in // canonical form // // let fqdn_labels = Label count of the fqdn above. let fqdn_labels = name.num_labels(); // if rrsig_labels = fqdn_labels, // name = fqdn if fqdn_labels == num_labels { return Ok(name.clone()); } // if rrsig_labels < fqdn_labels, // name = "*." | the rightmost rrsig_label labels of the // fqdn if num_labels < fqdn_labels { let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap(); let rightmost = name.trim_to(num_labels as usize); if !rightmost.is_root() { star_name = star_name.append_name(&rightmost)?; return Ok(star_name); } return Ok(star_name); } // // if rrsig_labels > fqdn_labels // the RRSIG RR did not pass the necessary validation // checks and MUST NOT be used to authenticate this // RRset. Err(format!("could not determine name from {}", name).into()) }
random_line_split
tbs.rs
//! hash functions for DNSSec operations use super::rdata::{sig, DNSSECRData, SIG}; use crate::error::*; use crate::rr::dnssec::Algorithm; use crate::rr::{DNSClass, Name, RData, Record, RecordType}; use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode}; /// Data To Be Signed. pub struct TBS(Vec<u8>); impl<'a> From<&'a [u8]> for TBS { fn from(slice: &'a [u8]) -> Self { Self(slice.to_owned()) } } impl AsRef<[u8]> for TBS { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// Returns the to-be-signed serialization of the given message. pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> { // TODO: should perform the serialization and sign block by block to reduce the max memory // usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's // more important let mut buf: Vec<u8> = Vec::with_capacity(512); let mut buf2: Vec<u8> = Vec::with_capacity(512); { let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal); assert!(sig::emit_pre_sig( &mut encoder, pre_sig0.type_covered(), pre_sig0.algorithm(), pre_sig0.num_labels(), pre_sig0.original_ttl(), pre_sig0.sig_expiration(), pre_sig0.sig_inception(), pre_sig0.key_tag(), pre_sig0.signer_name(), ) .is_ok()); // need a separate encoder here, as the encoding references absolute positions // inside the buffer. If the buffer already contains the sig0 RDATA, offsets // are wrong and the signature won't match. let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing); message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?) } buf.append(&mut buf2); Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set. /// /// # Arguments /// /// * `name` - RRset record name /// * `dns_class` - DNSClass, i.e. IN, of the records /// * `num_labels` - number of labels in the name, needed to deal with `*.example.com` /// * `type_covered` - RecordType of the RRSet being hashed /// * `algorithm` - The Algorithm type used for the hashing /// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record /// * `sig_expiration` - the epoch seconds of when this hashed signature will expire /// * `key_inception` - the epoch seconds of when this hashed signature will be valid /// * `signer_name` - label of the etity responsible for signing this hash /// * `records` - RRSet to hash /// /// # Returns /// /// the binary hash of the specified RRSet and associated information // FIXME: OMG, there are a ton of asserts in here... #[allow(clippy::too_many_arguments)] pub fn rrset_tbs( name: &Name, dns_class: DNSClass, num_labels: u8, type_covered: RecordType, algorithm: Algorithm, original_ttl: u32, sig_expiration: u32, sig_inception: u32, key_tag: u16, signer_name: &Name, records: &[Record], ) -> ProtoResult<TBS> { // TODO: change this to a BTreeSet so that it's preordered, no sort necessary let mut rrset: Vec<&Record> = Vec::new(); // collect only the records for this rrset for record in records { if dns_class == record.dns_class() && type_covered == record.rr_type() && name == record.name() { rrset.push(record); } } // put records in canonical order rrset.sort(); let name = determine_name(name, num_labels)?; // TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations... let mut buf: Vec<u8> = Vec::new(); { let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf); encoder.set_canonical_names(true); // signed_data = RRSIG_RDATA | RR(1) | RR(2)... where // // "|" denotes concatenation // // RRSIG_RDATA is the wire format of the RRSIG RDATA fields // with the Signature field excluded and the Signer's Name // in canonical form. assert!(sig::emit_pre_sig( &mut encoder, type_covered, algorithm, name.num_labels(), original_ttl, sig_expiration, sig_inception, key_tag, signer_name, ) .is_ok()); // construct the rrset signing data for record in rrset { // RR(i) = name | type | class | OrigTTL | RDATA length | RDATA // // name is calculated according to the function in the RFC 4035 assert!(name .to_lowercase() .emit_as_canonical(&mut encoder, true) .is_ok()); // // type is the RRset type and all RRs in the class assert!(type_covered.emit(&mut encoder).is_ok()); // // class is the RRset's class assert!(dns_class.emit(&mut encoder).is_ok()); // // OrigTTL is the value from the RRSIG Original TTL field assert!(encoder.emit_u32(original_ttl).is_ok()); // // RDATA length // TODO: add support to the encoder to set a marker to go back and write the length let mut rdata_buf = Vec::new(); { let mut rdata_encoder = BinEncoder::new(&mut rdata_buf); rdata_encoder.set_canonical_names(true); if let Some(rdata) = record.data() { assert!(rdata.emit(&mut rdata_encoder).is_ok()); } } assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok()); // // All names in the RDATA field are in canonical form (set above) assert!(encoder.emit_vec(&rdata_buf).is_ok()); } } Ok(TBS(buf)) } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the RRSIG record. /// /// # Arguments /// /// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> { if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() { rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records) } else { Err(format!("could not determine name from {}", rrsig.name()).into()) } } /// Returns the to-be-signed serialization of the given record set using the information /// provided from the SIG record. /// /// # Arguments /// /// * `name` - labels of the record to sign /// * `dns_class` - DNSClass of the RRSet, i.e. IN /// * `sig` - SIG or RRSIG record, which was produced from the RRSet /// * `records` - RRSet records to sign with the information in the `rrsig` /// /// # Return /// /// binary hash of the RRSet with the information from the RRSIG record pub fn rrset_tbs_with_sig( name: &Name, dns_class: DNSClass, sig: &SIG, records: &[Record], ) -> ProtoResult<TBS> { rrset_tbs( name, dns_class, sig.num_labels(), sig.type_covered(), sig.algorithm(), sig.original_ttl(), sig.sig_expiration(), sig.sig_inception(), sig.key_tag(), sig.signer_name(), records, ) } /// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005 /// /// ```text /// /// 5.3.2. Reconstructing the Signed Data /// ... /// To calculate the name: /// let rrsig_labels = the value of the RRSIG Labels field /// /// let fqdn = RRset's fully qualified domain name in /// canonical form /// /// let fqdn_labels = Label count of the fqdn above. /// /// if rrsig_labels = fqdn_labels, /// name = fqdn /// /// if rrsig_labels < fqdn_labels, /// name = "*." | the rightmost rrsig_label labels of the /// fqdn /// /// if rrsig_labels > fqdn_labels /// the RRSIG RR did not pass the necessary validation /// checks and MUST NOT be used to authenticate this /// RRset. /// /// The canonical forms for names and RRsets are defined in [RFC4034]. /// ``` pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> { // To calculate the name: // let rrsig_labels = the value of the RRSIG Labels field // // let fqdn = RRset's fully qualified domain name in // canonical form // // let fqdn_labels = Label count of the fqdn above. let fqdn_labels = name.num_labels(); // if rrsig_labels = fqdn_labels, // name = fqdn if fqdn_labels == num_labels
// if rrsig_labels < fqdn_labels, // name = "*." | the rightmost rrsig_label labels of the // fqdn if num_labels < fqdn_labels { let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap(); let rightmost = name.trim_to(num_labels as usize); if !rightmost.is_root() { star_name = star_name.append_name(&rightmost)?; return Ok(star_name); } return Ok(star_name); } // // if rrsig_labels > fqdn_labels // the RRSIG RR did not pass the necessary validation // checks and MUST NOT be used to authenticate this // RRset. Err(format!("could not determine name from {}", name).into()) }
{ return Ok(name.clone()); }
conditional_block
index.js
var util = require('util'); var ardrone = require('ar-drone-browserified'); var parseAT = require('./lib/atreader'); var createCam = require('voxel-camera'); var tic = require('tic')(); var Drone = function(options) { var self = this; if (options.THREE) options = {game:options}; if (!options.game) throw new Error('Must specify a game.'); self.game = options.game; self.game.on('tick', tic.tick.bind(tic)); self.size = options.size || 1; self.altitudeLimit = options.altitudeLimit || 0; self.yawSpeed = options.yawSpeed || 0.1; self.verticalSpeed = options.verticalSpeed || 0.1; self.tilt = options.tilt || 0.1; self.flying = false; self.batteryCapacity = 120000; // 20 mins self._batteryLevel = 120000; self._animating = false; self._ledanimating = false; self._navdata = require('./lib/navdata.json'); self._drone = false; options.udpNavdataStream = options.udpNavdataStream || new ardrone.UdpNavdataStream({ parser: function(buf) { return buf; } }); ardrone.Client.call(self, options); // copy over ANIMATIONS and LED_ANIMATIONS self.ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').ANIMATIONS; self.LED_ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').LED_ANIMATIONS; self.LED_COLORS = { 0: [new self.game.THREE.Color(0x000000), 0], 1: [new self.game.THREE.Color(0xff0000), 1], 2: [new self.game.THREE.Color(0x00ff00), 1], 3: [new self.game.THREE.Color(0xff9900), 1] }; // on data from udpControl self._cmds = []; self._udpControl._socket.on('data', function(cmds) { self._cmds = self._cmds.concat(parseAT(cmds)); }); // start up emitters self.resume(); // emit navdata var seq = 0; setInterval(function() { if (options.udpNavdataStream._initialized === true) { options.udpNavdataStream._socket.emit('message', self._emitNavdata(seq++)); } }, 100); }; util.inherits(Drone, ardrone.Client); module.exports = function(options) { return new Drone(options); }; module.exports.Drone = Drone; // return the drone item to add to game Drone.prototype.item = function(item) { var self = this; if (item) { item.tick = self.createTick(item); self._drone = item; return self._drone; } var group = new self.game.THREE.Object3D(); var drone = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(self.size, self.size/6, self.size), self.game.materials.material ); drone.position.set(0, self.size/6, 0); drone.rotation.y = deg2Rad(-90); group.add(drone); self.game.materials.load([[ 'drone-side', 'drone-front', 'drone-top', 'drone-bottom', 'drone-side', 'drone-side' ]], function(textures) { self.game.materials.paint(drone, textures[0]); }); self._leds = self._addLEDs(group); self.leds('standard'); self._drone = self.game.addItem({ mesh: group, size: self.size, velocity: {x: 0, y: 0, z: 0} }); self._drone.tick = self.createTick(self._drone); return self._drone; }; // process AT* commands to control drone Drone.prototype.createTick = function(drone) { var self = this; var dt = 0; var oldTick = drone.tick || function() {}; return function(delta) { dt += 0.01; // drain battery - video on, flying, animating self._batteryLevel -= (self._animating && self.flying) ? 4 : (self.flying) ? 1.75 : 0.5; // dead battery X| if (self._batteryLevel <= 0) { self.land(); return; } oldTick.call(drone, delta); var didem = []; self._cmds.forEach(function(cmd) { // only process the first unique if (didem.indexOf(cmd.type + cmd.args[0]) !== -1) return; didem.push(cmd.type + cmd.args[0]); self['_handle' + cmd.type](dt, drone, cmd); }); self._cmds = []; // render the camera, follow the drone if (self._cameraControl) { self._cameraControl.render( self._drone, new self.game.THREE.Vector3(-2, 0, 0), new self.game.THREE.Vector3(-100, 0, 0) ); // monitor follows the player self._monitor.position = self.game.controls.yawObject.position.clone(); self._monitor.position.z += 2; self._monitor.position.y += 0.75; } }; }; // display video monitor // todo: integrate more with ar-drone lib // also where is the bottom camera? Drone.prototype.viewCamera = function() { var self = this; if (!self._cameraControl) { self._cameraControl = createCam(self.game); // use the camera's png stream :D self._pngStream = self._cameraControl; // add the camera var camera = self._cameraControl.camera(); self.game.scene.add(camera); self._monitor = new self.game.THREE.Object3D(); var height = 1; var padding = 0.01; var video = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(1.77 * height, height, 0), new self.game.THREE.MeshBasicMaterial({ map: self._cameraControl.monitor() }) ); self._monitor.add(video); // border var border = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry((1.77 * height) + padding, height + padding, padding), new self.game.THREE.MeshBasicMaterial({color: 0x000000}) ); border.position.set(0, 0, padding); self._monitor.add(border); self._monitor.rotation.x = deg2Rad(60); self.game.scene.add(self._monitor); } return self._monitor; }; // turn on/off the leds Drone.prototype.leds = function(leds) { var self = this; if (typeof leds === 'string') { if (leds === 'red') leds = [1, 1, 1, 1]; else if (leds === 'green') leds = [2, 2, 2, 2]; else if (leds === 'standard') leds = [1, 1, 2, 2]; else leds = [0, 0, 0, 0]; } leds.forEach(function(led, i) { var obj = self._leds[i]; obj.material.color = obj.material.emissive = self.LED_COLORS[led][0]; obj.material.opacity = self.LED_COLORS[led][1]; obj.material.transparent = (obj.material.opacity < 1) ? true : false; }); }; Drone.prototype._addLEDs = function(group) { var leds = []; for (var i = 0; i < 4; i++) { var led = new this.game.THREE.Mesh( new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20), new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000}) ); led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45)))); led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45)))); leds.push(led); if (group) group.add(led); } return leds; }; Drone.prototype._emitNavdata = function(seq) { var self = this; with (self._navdata) { sequenceNumber = seq; demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100); droneState.flying = self.flying ? 1 : 0; // todo: set this closer to actual states demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED'; if (self._drone !== false) { /*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x; demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z; demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y; demo.velocity.x = demo.xVelocity = self._drone.velocity.z; demo.velocity.y = demo.yVelocity = self._drone.velocity.x; demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/ // todo: calculate altitude } } return self._navdata; }; Drone.prototype._handleREF = function(dt, drone, cmd) { var self = this; if (cmd.args[0] === 512) { setxyz(drone.resting, false); if (!self.flying) { // takeoff! drone.removeForce(self.game.gravity); drone.velocity.y += 0.002; self.flying = true; tic.timeout(function() { drone.velocity.y = 0; }, 500); } } else { if (self.flying) { // land! self.stop(); setxyz(drone.velocity, 0); setxyz(drone.avatar.children[0].rotation, 0); drone.subjectTo(self.game.gravity); self.flying = false; // TODO: land more realistically } } }; Drone.prototype._handlePCMD = function(dt, drone, cmd) { if (!this.flying) return; setxyz(drone.velocity, 0); // args: flags, leftRight, frontBack, upDown, clockWise // dont know why leftRight/frontBack are totally switched but they are! var frontBack = cmd.args[2] || 0; var leftRight = cmd.args[1] || 0; var upDown = cmd.args[3] || 0; var clockwise = cmd.args[4] || 0; // reduce speed var tilt = this.tilt / 100; var verticalSpeed = this.verticalSpeed / 100; var rot = drone.avatar.children[0]; // todo: figure auto leveling out // when it hits 0, it doesnt level for some reason rot.rotation.x = anim(dt, rot.rotation.x, -frontBack/2); if (frontBack !== 0) drone.velocity.z = frontBack * tilt; else if (!this._animating) rot.rotation.x = 0; rot.rotation.z = anim(dt, rot.rotation.z, -leftRight/2); if (leftRight !== 0) drone.velocity.x = -leftRight * tilt; else if (!this._animating) rot.rotation.z = 0; if (upDown !== 0) drone.velocity.y += upDown * verticalSpeed; if (clockwise !== 0) drone.rotation.y += clockwise * this.yawSpeed; // tmp fallback level out if (frontBack === 0 && leftRight === 0 && !this._animating) { rot.rotation.x = 0; rot.rotation.z = 0; } // cap the amount of tilt if (Math.abs(rot.rotation.z) >= 1 && !this._animating) { rot.rotation.z = rot.rotation.z < 0 ? -1 : 1; } if (Math.abs(rot.rotation.x) >= 1 && !this._animating) { rot.rotation.x = rot.rotation.x < 0 ? -1 : 1; } }; // Handle AT*CONFIG Drone.prototype._handleCONFIG = function(dt, drone, cmd) { switch (cmd.args[0]) { case 'control:flight_anim': this._handleANIM(dt, drone, cmd); break; case 'leds:leds_anim': this._handleLED(dt, drone, cmd); break; } }; // Handle AT*CONFG=1,control:flight_anim Drone.prototype._handleANIM = function(dt, drone, cmd) { var self = this; if (!self.flying || this._animating) return; // todo: tweak this closer to actual drone var duration = Number(cmd.args[2]) * 10; var type = this.ANIMATIONS[parseInt(cmd.args[1])]; self._animating = true; tic.timeout(function() { self._animating = false; }, duration); switch (type) { case 'flipLeft': case 'flipRight': case 'flipAhead': case 'flipBehind': // todo: for longer durations this gets out of hand. should only happen once. drone.velocity.y += 0.0045; tic.timeout(function() { var amt = (type === 'flipLeft' || type === 'flipAhead') ? deg2Rad(360) : -deg2Rad(360); var dir = (type === 'flipLeft' || type === 'flipRight') ? 'x' : 'z'; drone.avatar.children[0].rotation[dir] = anim(dt, drone.avatar.children[0].rotation[dir], amt, duration); }, duration / 5); // todo: better adjust above to mimic actual drone // where it flies up dramatically flips and comes down tic.timeout(function() { drone.velocity.y -= 0.002; }, duration - (duration / 10)); break; // todo: handle the other animations } }; // Handle AT*CONFG=1,control:leds_anim // todo: this is totally not correct! Drone.prototype._handleLED = function(dt, drone, cmd) { var self = this; if (this._ledanimating) return; var type = this.LED_ANIMATIONS[parseInt(cmd.args[1])]; var hz = Number(cmd.args[2]); var duration = Number(cmd.args[3]) * 1000; var on = 0; var i = 0; self.leds('blank'); var clearInterval = tic.interval(function() { if (!self._ledanimating) return; switch (type) { case 'blinkRed': case 'blinkGreen': case 'blinkOrange': var n = type === 'blinkRed' ? 1 : type === 'blinkGreen' ? 2 : 3; on = Math.sin(TAU * hz * i) > 0 ? n : 0; self.leds([on, on, on, on]); break; case 'blinkStandard': self.leds(Math.sin(TAU * hz * i) > 0 ? 'standard' : 'blank'); break; case 'blinkGreenRed': self.leds(Math.sin(TAU * hz * i) > 0 ? 'green' : 'red'); break; default: self.leds(type); break; // todo: handle other leds animations } i += 0.01; }, 100); self._ledanimating = true; tic.timeout(function() { clearInterval(); self.leds('standard'); self._ledanimating = false; }, duration); }; function setxyz(item, x, y, z) { if (arguments.length < 3) { y = x; z = x; } item.x = x; item.y = y; item.z = z; } // animate values to produce smoother results function
(t, from, to, d) { var should = to > 0 ? from < to ? true : false : from > to ? true : false; if (!should) return from; t /= d || 100; return -to * t * (t - 2) + from; }; var TAU = Math.PI * 2; function deg2Rad(deg) { return deg * (Math.PI / 180); }
anim
identifier_name
index.js
var util = require('util'); var ardrone = require('ar-drone-browserified'); var parseAT = require('./lib/atreader'); var createCam = require('voxel-camera'); var tic = require('tic')(); var Drone = function(options) { var self = this; if (options.THREE) options = {game:options}; if (!options.game) throw new Error('Must specify a game.'); self.game = options.game; self.game.on('tick', tic.tick.bind(tic)); self.size = options.size || 1; self.altitudeLimit = options.altitudeLimit || 0; self.yawSpeed = options.yawSpeed || 0.1; self.verticalSpeed = options.verticalSpeed || 0.1; self.tilt = options.tilt || 0.1; self.flying = false; self.batteryCapacity = 120000; // 20 mins self._batteryLevel = 120000; self._animating = false; self._ledanimating = false; self._navdata = require('./lib/navdata.json'); self._drone = false; options.udpNavdataStream = options.udpNavdataStream || new ardrone.UdpNavdataStream({ parser: function(buf) { return buf; } }); ardrone.Client.call(self, options); // copy over ANIMATIONS and LED_ANIMATIONS self.ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').ANIMATIONS; self.LED_ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').LED_ANIMATIONS; self.LED_COLORS = { 0: [new self.game.THREE.Color(0x000000), 0], 1: [new self.game.THREE.Color(0xff0000), 1], 2: [new self.game.THREE.Color(0x00ff00), 1], 3: [new self.game.THREE.Color(0xff9900), 1] }; // on data from udpControl self._cmds = []; self._udpControl._socket.on('data', function(cmds) { self._cmds = self._cmds.concat(parseAT(cmds));
}); // start up emitters self.resume(); // emit navdata var seq = 0; setInterval(function() { if (options.udpNavdataStream._initialized === true) { options.udpNavdataStream._socket.emit('message', self._emitNavdata(seq++)); } }, 100); }; util.inherits(Drone, ardrone.Client); module.exports = function(options) { return new Drone(options); }; module.exports.Drone = Drone; // return the drone item to add to game Drone.prototype.item = function(item) { var self = this; if (item) { item.tick = self.createTick(item); self._drone = item; return self._drone; } var group = new self.game.THREE.Object3D(); var drone = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(self.size, self.size/6, self.size), self.game.materials.material ); drone.position.set(0, self.size/6, 0); drone.rotation.y = deg2Rad(-90); group.add(drone); self.game.materials.load([[ 'drone-side', 'drone-front', 'drone-top', 'drone-bottom', 'drone-side', 'drone-side' ]], function(textures) { self.game.materials.paint(drone, textures[0]); }); self._leds = self._addLEDs(group); self.leds('standard'); self._drone = self.game.addItem({ mesh: group, size: self.size, velocity: {x: 0, y: 0, z: 0} }); self._drone.tick = self.createTick(self._drone); return self._drone; }; // process AT* commands to control drone Drone.prototype.createTick = function(drone) { var self = this; var dt = 0; var oldTick = drone.tick || function() {}; return function(delta) { dt += 0.01; // drain battery - video on, flying, animating self._batteryLevel -= (self._animating && self.flying) ? 4 : (self.flying) ? 1.75 : 0.5; // dead battery X| if (self._batteryLevel <= 0) { self.land(); return; } oldTick.call(drone, delta); var didem = []; self._cmds.forEach(function(cmd) { // only process the first unique if (didem.indexOf(cmd.type + cmd.args[0]) !== -1) return; didem.push(cmd.type + cmd.args[0]); self['_handle' + cmd.type](dt, drone, cmd); }); self._cmds = []; // render the camera, follow the drone if (self._cameraControl) { self._cameraControl.render( self._drone, new self.game.THREE.Vector3(-2, 0, 0), new self.game.THREE.Vector3(-100, 0, 0) ); // monitor follows the player self._monitor.position = self.game.controls.yawObject.position.clone(); self._monitor.position.z += 2; self._monitor.position.y += 0.75; } }; }; // display video monitor // todo: integrate more with ar-drone lib // also where is the bottom camera? Drone.prototype.viewCamera = function() { var self = this; if (!self._cameraControl) { self._cameraControl = createCam(self.game); // use the camera's png stream :D self._pngStream = self._cameraControl; // add the camera var camera = self._cameraControl.camera(); self.game.scene.add(camera); self._monitor = new self.game.THREE.Object3D(); var height = 1; var padding = 0.01; var video = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(1.77 * height, height, 0), new self.game.THREE.MeshBasicMaterial({ map: self._cameraControl.monitor() }) ); self._monitor.add(video); // border var border = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry((1.77 * height) + padding, height + padding, padding), new self.game.THREE.MeshBasicMaterial({color: 0x000000}) ); border.position.set(0, 0, padding); self._monitor.add(border); self._monitor.rotation.x = deg2Rad(60); self.game.scene.add(self._monitor); } return self._monitor; }; // turn on/off the leds Drone.prototype.leds = function(leds) { var self = this; if (typeof leds === 'string') { if (leds === 'red') leds = [1, 1, 1, 1]; else if (leds === 'green') leds = [2, 2, 2, 2]; else if (leds === 'standard') leds = [1, 1, 2, 2]; else leds = [0, 0, 0, 0]; } leds.forEach(function(led, i) { var obj = self._leds[i]; obj.material.color = obj.material.emissive = self.LED_COLORS[led][0]; obj.material.opacity = self.LED_COLORS[led][1]; obj.material.transparent = (obj.material.opacity < 1) ? true : false; }); }; Drone.prototype._addLEDs = function(group) { var leds = []; for (var i = 0; i < 4; i++) { var led = new this.game.THREE.Mesh( new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20), new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000}) ); led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45)))); led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45)))); leds.push(led); if (group) group.add(led); } return leds; }; Drone.prototype._emitNavdata = function(seq) { var self = this; with (self._navdata) { sequenceNumber = seq; demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100); droneState.flying = self.flying ? 1 : 0; // todo: set this closer to actual states demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED'; if (self._drone !== false) { /*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x; demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z; demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y; demo.velocity.x = demo.xVelocity = self._drone.velocity.z; demo.velocity.y = demo.yVelocity = self._drone.velocity.x; demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/ // todo: calculate altitude } } return self._navdata; }; Drone.prototype._handleREF = function(dt, drone, cmd) { var self = this; if (cmd.args[0] === 512) { setxyz(drone.resting, false); if (!self.flying) { // takeoff! drone.removeForce(self.game.gravity); drone.velocity.y += 0.002; self.flying = true; tic.timeout(function() { drone.velocity.y = 0; }, 500); } } else { if (self.flying) { // land! self.stop(); setxyz(drone.velocity, 0); setxyz(drone.avatar.children[0].rotation, 0); drone.subjectTo(self.game.gravity); self.flying = false; // TODO: land more realistically } } }; Drone.prototype._handlePCMD = function(dt, drone, cmd) { if (!this.flying) return; setxyz(drone.velocity, 0); // args: flags, leftRight, frontBack, upDown, clockWise // dont know why leftRight/frontBack are totally switched but they are! var frontBack = cmd.args[2] || 0; var leftRight = cmd.args[1] || 0; var upDown = cmd.args[3] || 0; var clockwise = cmd.args[4] || 0; // reduce speed var tilt = this.tilt / 100; var verticalSpeed = this.verticalSpeed / 100; var rot = drone.avatar.children[0]; // todo: figure auto leveling out // when it hits 0, it doesnt level for some reason rot.rotation.x = anim(dt, rot.rotation.x, -frontBack/2); if (frontBack !== 0) drone.velocity.z = frontBack * tilt; else if (!this._animating) rot.rotation.x = 0; rot.rotation.z = anim(dt, rot.rotation.z, -leftRight/2); if (leftRight !== 0) drone.velocity.x = -leftRight * tilt; else if (!this._animating) rot.rotation.z = 0; if (upDown !== 0) drone.velocity.y += upDown * verticalSpeed; if (clockwise !== 0) drone.rotation.y += clockwise * this.yawSpeed; // tmp fallback level out if (frontBack === 0 && leftRight === 0 && !this._animating) { rot.rotation.x = 0; rot.rotation.z = 0; } // cap the amount of tilt if (Math.abs(rot.rotation.z) >= 1 && !this._animating) { rot.rotation.z = rot.rotation.z < 0 ? -1 : 1; } if (Math.abs(rot.rotation.x) >= 1 && !this._animating) { rot.rotation.x = rot.rotation.x < 0 ? -1 : 1; } }; // Handle AT*CONFIG Drone.prototype._handleCONFIG = function(dt, drone, cmd) { switch (cmd.args[0]) { case 'control:flight_anim': this._handleANIM(dt, drone, cmd); break; case 'leds:leds_anim': this._handleLED(dt, drone, cmd); break; } }; // Handle AT*CONFG=1,control:flight_anim Drone.prototype._handleANIM = function(dt, drone, cmd) { var self = this; if (!self.flying || this._animating) return; // todo: tweak this closer to actual drone var duration = Number(cmd.args[2]) * 10; var type = this.ANIMATIONS[parseInt(cmd.args[1])]; self._animating = true; tic.timeout(function() { self._animating = false; }, duration); switch (type) { case 'flipLeft': case 'flipRight': case 'flipAhead': case 'flipBehind': // todo: for longer durations this gets out of hand. should only happen once. drone.velocity.y += 0.0045; tic.timeout(function() { var amt = (type === 'flipLeft' || type === 'flipAhead') ? deg2Rad(360) : -deg2Rad(360); var dir = (type === 'flipLeft' || type === 'flipRight') ? 'x' : 'z'; drone.avatar.children[0].rotation[dir] = anim(dt, drone.avatar.children[0].rotation[dir], amt, duration); }, duration / 5); // todo: better adjust above to mimic actual drone // where it flies up dramatically flips and comes down tic.timeout(function() { drone.velocity.y -= 0.002; }, duration - (duration / 10)); break; // todo: handle the other animations } }; // Handle AT*CONFG=1,control:leds_anim // todo: this is totally not correct! Drone.prototype._handleLED = function(dt, drone, cmd) { var self = this; if (this._ledanimating) return; var type = this.LED_ANIMATIONS[parseInt(cmd.args[1])]; var hz = Number(cmd.args[2]); var duration = Number(cmd.args[3]) * 1000; var on = 0; var i = 0; self.leds('blank'); var clearInterval = tic.interval(function() { if (!self._ledanimating) return; switch (type) { case 'blinkRed': case 'blinkGreen': case 'blinkOrange': var n = type === 'blinkRed' ? 1 : type === 'blinkGreen' ? 2 : 3; on = Math.sin(TAU * hz * i) > 0 ? n : 0; self.leds([on, on, on, on]); break; case 'blinkStandard': self.leds(Math.sin(TAU * hz * i) > 0 ? 'standard' : 'blank'); break; case 'blinkGreenRed': self.leds(Math.sin(TAU * hz * i) > 0 ? 'green' : 'red'); break; default: self.leds(type); break; // todo: handle other leds animations } i += 0.01; }, 100); self._ledanimating = true; tic.timeout(function() { clearInterval(); self.leds('standard'); self._ledanimating = false; }, duration); }; function setxyz(item, x, y, z) { if (arguments.length < 3) { y = x; z = x; } item.x = x; item.y = y; item.z = z; } // animate values to produce smoother results function anim(t, from, to, d) { var should = to > 0 ? from < to ? true : false : from > to ? true : false; if (!should) return from; t /= d || 100; return -to * t * (t - 2) + from; }; var TAU = Math.PI * 2; function deg2Rad(deg) { return deg * (Math.PI / 180); }
random_line_split
index.js
var util = require('util'); var ardrone = require('ar-drone-browserified'); var parseAT = require('./lib/atreader'); var createCam = require('voxel-camera'); var tic = require('tic')(); var Drone = function(options) { var self = this; if (options.THREE) options = {game:options}; if (!options.game) throw new Error('Must specify a game.'); self.game = options.game; self.game.on('tick', tic.tick.bind(tic)); self.size = options.size || 1; self.altitudeLimit = options.altitudeLimit || 0; self.yawSpeed = options.yawSpeed || 0.1; self.verticalSpeed = options.verticalSpeed || 0.1; self.tilt = options.tilt || 0.1; self.flying = false; self.batteryCapacity = 120000; // 20 mins self._batteryLevel = 120000; self._animating = false; self._ledanimating = false; self._navdata = require('./lib/navdata.json'); self._drone = false; options.udpNavdataStream = options.udpNavdataStream || new ardrone.UdpNavdataStream({ parser: function(buf) { return buf; } }); ardrone.Client.call(self, options); // copy over ANIMATIONS and LED_ANIMATIONS self.ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').ANIMATIONS; self.LED_ANIMATIONS = require('ar-drone-browserified/lib/control/AtCommandCreator').LED_ANIMATIONS; self.LED_COLORS = { 0: [new self.game.THREE.Color(0x000000), 0], 1: [new self.game.THREE.Color(0xff0000), 1], 2: [new self.game.THREE.Color(0x00ff00), 1], 3: [new self.game.THREE.Color(0xff9900), 1] }; // on data from udpControl self._cmds = []; self._udpControl._socket.on('data', function(cmds) { self._cmds = self._cmds.concat(parseAT(cmds)); }); // start up emitters self.resume(); // emit navdata var seq = 0; setInterval(function() { if (options.udpNavdataStream._initialized === true) { options.udpNavdataStream._socket.emit('message', self._emitNavdata(seq++)); } }, 100); }; util.inherits(Drone, ardrone.Client); module.exports = function(options) { return new Drone(options); }; module.exports.Drone = Drone; // return the drone item to add to game Drone.prototype.item = function(item) { var self = this; if (item) { item.tick = self.createTick(item); self._drone = item; return self._drone; } var group = new self.game.THREE.Object3D(); var drone = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(self.size, self.size/6, self.size), self.game.materials.material ); drone.position.set(0, self.size/6, 0); drone.rotation.y = deg2Rad(-90); group.add(drone); self.game.materials.load([[ 'drone-side', 'drone-front', 'drone-top', 'drone-bottom', 'drone-side', 'drone-side' ]], function(textures) { self.game.materials.paint(drone, textures[0]); }); self._leds = self._addLEDs(group); self.leds('standard'); self._drone = self.game.addItem({ mesh: group, size: self.size, velocity: {x: 0, y: 0, z: 0} }); self._drone.tick = self.createTick(self._drone); return self._drone; }; // process AT* commands to control drone Drone.prototype.createTick = function(drone) { var self = this; var dt = 0; var oldTick = drone.tick || function() {}; return function(delta) { dt += 0.01; // drain battery - video on, flying, animating self._batteryLevel -= (self._animating && self.flying) ? 4 : (self.flying) ? 1.75 : 0.5; // dead battery X| if (self._batteryLevel <= 0) { self.land(); return; } oldTick.call(drone, delta); var didem = []; self._cmds.forEach(function(cmd) { // only process the first unique if (didem.indexOf(cmd.type + cmd.args[0]) !== -1) return; didem.push(cmd.type + cmd.args[0]); self['_handle' + cmd.type](dt, drone, cmd); }); self._cmds = []; // render the camera, follow the drone if (self._cameraControl) { self._cameraControl.render( self._drone, new self.game.THREE.Vector3(-2, 0, 0), new self.game.THREE.Vector3(-100, 0, 0) ); // monitor follows the player self._monitor.position = self.game.controls.yawObject.position.clone(); self._monitor.position.z += 2; self._monitor.position.y += 0.75; } }; }; // display video monitor // todo: integrate more with ar-drone lib // also where is the bottom camera? Drone.prototype.viewCamera = function() { var self = this; if (!self._cameraControl) { self._cameraControl = createCam(self.game); // use the camera's png stream :D self._pngStream = self._cameraControl; // add the camera var camera = self._cameraControl.camera(); self.game.scene.add(camera); self._monitor = new self.game.THREE.Object3D(); var height = 1; var padding = 0.01; var video = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry(1.77 * height, height, 0), new self.game.THREE.MeshBasicMaterial({ map: self._cameraControl.monitor() }) ); self._monitor.add(video); // border var border = new self.game.THREE.Mesh( new self.game.THREE.CubeGeometry((1.77 * height) + padding, height + padding, padding), new self.game.THREE.MeshBasicMaterial({color: 0x000000}) ); border.position.set(0, 0, padding); self._monitor.add(border); self._monitor.rotation.x = deg2Rad(60); self.game.scene.add(self._monitor); } return self._monitor; }; // turn on/off the leds Drone.prototype.leds = function(leds) { var self = this; if (typeof leds === 'string') { if (leds === 'red') leds = [1, 1, 1, 1]; else if (leds === 'green') leds = [2, 2, 2, 2]; else if (leds === 'standard') leds = [1, 1, 2, 2]; else leds = [0, 0, 0, 0]; } leds.forEach(function(led, i) { var obj = self._leds[i]; obj.material.color = obj.material.emissive = self.LED_COLORS[led][0]; obj.material.opacity = self.LED_COLORS[led][1]; obj.material.transparent = (obj.material.opacity < 1) ? true : false; }); }; Drone.prototype._addLEDs = function(group) { var leds = []; for (var i = 0; i < 4; i++) { var led = new this.game.THREE.Mesh( new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20), new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000}) ); led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45)))); led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45)))); leds.push(led); if (group) group.add(led); } return leds; }; Drone.prototype._emitNavdata = function(seq) { var self = this; with (self._navdata) { sequenceNumber = seq; demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100); droneState.flying = self.flying ? 1 : 0; // todo: set this closer to actual states demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED'; if (self._drone !== false) { /*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x; demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z; demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y; demo.velocity.x = demo.xVelocity = self._drone.velocity.z; demo.velocity.y = demo.yVelocity = self._drone.velocity.x; demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/ // todo: calculate altitude } } return self._navdata; }; Drone.prototype._handleREF = function(dt, drone, cmd) { var self = this; if (cmd.args[0] === 512) { setxyz(drone.resting, false); if (!self.flying) { // takeoff! drone.removeForce(self.game.gravity); drone.velocity.y += 0.002; self.flying = true; tic.timeout(function() { drone.velocity.y = 0; }, 500); } } else { if (self.flying) { // land! self.stop(); setxyz(drone.velocity, 0); setxyz(drone.avatar.children[0].rotation, 0); drone.subjectTo(self.game.gravity); self.flying = false; // TODO: land more realistically } } }; Drone.prototype._handlePCMD = function(dt, drone, cmd) { if (!this.flying) return; setxyz(drone.velocity, 0); // args: flags, leftRight, frontBack, upDown, clockWise // dont know why leftRight/frontBack are totally switched but they are! var frontBack = cmd.args[2] || 0; var leftRight = cmd.args[1] || 0; var upDown = cmd.args[3] || 0; var clockwise = cmd.args[4] || 0; // reduce speed var tilt = this.tilt / 100; var verticalSpeed = this.verticalSpeed / 100; var rot = drone.avatar.children[0]; // todo: figure auto leveling out // when it hits 0, it doesnt level for some reason rot.rotation.x = anim(dt, rot.rotation.x, -frontBack/2); if (frontBack !== 0) drone.velocity.z = frontBack * tilt; else if (!this._animating) rot.rotation.x = 0; rot.rotation.z = anim(dt, rot.rotation.z, -leftRight/2); if (leftRight !== 0) drone.velocity.x = -leftRight * tilt; else if (!this._animating) rot.rotation.z = 0; if (upDown !== 0) drone.velocity.y += upDown * verticalSpeed; if (clockwise !== 0) drone.rotation.y += clockwise * this.yawSpeed; // tmp fallback level out if (frontBack === 0 && leftRight === 0 && !this._animating) { rot.rotation.x = 0; rot.rotation.z = 0; } // cap the amount of tilt if (Math.abs(rot.rotation.z) >= 1 && !this._animating) { rot.rotation.z = rot.rotation.z < 0 ? -1 : 1; } if (Math.abs(rot.rotation.x) >= 1 && !this._animating) { rot.rotation.x = rot.rotation.x < 0 ? -1 : 1; } }; // Handle AT*CONFIG Drone.prototype._handleCONFIG = function(dt, drone, cmd) { switch (cmd.args[0]) { case 'control:flight_anim': this._handleANIM(dt, drone, cmd); break; case 'leds:leds_anim': this._handleLED(dt, drone, cmd); break; } }; // Handle AT*CONFG=1,control:flight_anim Drone.prototype._handleANIM = function(dt, drone, cmd) { var self = this; if (!self.flying || this._animating) return; // todo: tweak this closer to actual drone var duration = Number(cmd.args[2]) * 10; var type = this.ANIMATIONS[parseInt(cmd.args[1])]; self._animating = true; tic.timeout(function() { self._animating = false; }, duration); switch (type) { case 'flipLeft': case 'flipRight': case 'flipAhead': case 'flipBehind': // todo: for longer durations this gets out of hand. should only happen once. drone.velocity.y += 0.0045; tic.timeout(function() { var amt = (type === 'flipLeft' || type === 'flipAhead') ? deg2Rad(360) : -deg2Rad(360); var dir = (type === 'flipLeft' || type === 'flipRight') ? 'x' : 'z'; drone.avatar.children[0].rotation[dir] = anim(dt, drone.avatar.children[0].rotation[dir], amt, duration); }, duration / 5); // todo: better adjust above to mimic actual drone // where it flies up dramatically flips and comes down tic.timeout(function() { drone.velocity.y -= 0.002; }, duration - (duration / 10)); break; // todo: handle the other animations } }; // Handle AT*CONFG=1,control:leds_anim // todo: this is totally not correct! Drone.prototype._handleLED = function(dt, drone, cmd) { var self = this; if (this._ledanimating) return; var type = this.LED_ANIMATIONS[parseInt(cmd.args[1])]; var hz = Number(cmd.args[2]); var duration = Number(cmd.args[3]) * 1000; var on = 0; var i = 0; self.leds('blank'); var clearInterval = tic.interval(function() { if (!self._ledanimating) return; switch (type) { case 'blinkRed': case 'blinkGreen': case 'blinkOrange': var n = type === 'blinkRed' ? 1 : type === 'blinkGreen' ? 2 : 3; on = Math.sin(TAU * hz * i) > 0 ? n : 0; self.leds([on, on, on, on]); break; case 'blinkStandard': self.leds(Math.sin(TAU * hz * i) > 0 ? 'standard' : 'blank'); break; case 'blinkGreenRed': self.leds(Math.sin(TAU * hz * i) > 0 ? 'green' : 'red'); break; default: self.leds(type); break; // todo: handle other leds animations } i += 0.01; }, 100); self._ledanimating = true; tic.timeout(function() { clearInterval(); self.leds('standard'); self._ledanimating = false; }, duration); }; function setxyz(item, x, y, z)
// animate values to produce smoother results function anim(t, from, to, d) { var should = to > 0 ? from < to ? true : false : from > to ? true : false; if (!should) return from; t /= d || 100; return -to * t * (t - 2) + from; }; var TAU = Math.PI * 2; function deg2Rad(deg) { return deg * (Math.PI / 180); }
{ if (arguments.length < 3) { y = x; z = x; } item.x = x; item.y = y; item.z = z; }
identifier_body
function.py
# -*- coding:utf-8 -*- import os import sys projectRoot = '/media/wangchen/newdata1/wangchen/work/Indoor_caffe/' caffePath = '/home/wangchen/caffe' os.environ['CAFFE_ROOT'] = caffePath try: caffe_root = os.environ['CAFFE_ROOT'] + '/' print caffe_root except KeyError: raise KeyError("Define CAFFE_ROOT in ~/.bashrc") sys.path.insert(1, caffe_root + 'python/') sys.path.append \ (projectRoot + 'code/') import time import cv2 import caffe import numpy as np from sklearn import svm from sklearn import metrics import xlwt from sklearn import preprocessing from sklearn.decomposition import PCA import random from function_dataBase import * # =========================caffe接口====================== # ======================================================== def initcaffe(protourl, modelurl): caffe.set_device(1) caffe.set_mode_gpu() net = caffe.Net(protourl, modelurl, caffe.TEST) return net def imageTransformer(net, mean_data): transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2, 0, 1)) # c h w mean = mean_data.mean(1).mean(1) transformer.set_mean('data', mean) # transformer.set_raw_scale('data', 255) return transformer # =======================模型均值=========================== # readmeanfile : return mean data (w h c) # makeplaces205meandata: 生成pl205数据集的meanfile # ==================================================== def readmeanfile(meanfile): mean_blob = caffe.proto.caffe_pb2.BlobProto() mean_blob.ParseFromString(open(meanfile, 'rb').read()) # 将均值blob转为np.array mean_npy = caffe.io.blobproto_to_array(mean_blob) return mean_npy[0] # return mean_npy[0][:, 16:240, 16:240] def makeplaces205meandata(): out = np.zeros([3, 224, 224]) out[0, :, :] = 105.487823486 out[1, :, :] = 113.741088867 out[2, :, :] = 116.060394287 return out # =================特征计算=============================================================== # calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline) # calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool # get_feature_scala_256: 计算整个数据库normal_feature # get_pool_feature: 计算整个数据库pool_feature # get_cam_feature: # ======================================================================================== def calture_normal_feature(pic, caffenet, mean_data): transformer = imageTransformer(caffenet, mean_data) feature = np.zeros((1, 4096)) # cv2.imshow('0',pic) sp = pic.shape y = sp[0] x = sp[1] if x < y: y = (int)((y * 256) / x) x = 256 else: x = (int)((x * 256) / y) y = 256 im_256 = cv2.resize(pic, (x, y)) # cv2.imshow('1',im_256) # cv2.waitKey(0) im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)] im = transformer.preprocess('data', im_224) ''' im = np.transpose(im_224, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() feature[0] = caffenet.blobs['fc7'].data[0] return feature def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False): im = np.zeros((parallelnum, 3, cropsize, cropsize)) transformer = imageTransformer(caffenet, mean_data) feature_max = np.zeros((parallelnum, 4096)) feature_max = feature_max - 9999 # 对每个尺度框图并提取特征 step = (picsize - cropsize) // steplength for m in range(step + 1): for n in range(step + 1): x = m * steplength y = n * steplength if x > picsize - cropsize: x = picsize - cropsize if y > picsize - cropsize: y = picsize - cropsize crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c if dropLabel == True: dropInt = random.randint(1, 100) if dropInt > 75: continue for i in range(parallelnum): im[i] = transformer.preprocess('data', crop[i]) ''' im = np.transpose(crop, (0, 3, 1, 2)) im = im - mean_data ''' caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for i in range(parallelnum): for j in range(4096): if tmp[i][j] >= feature_max[i][j]: feature_max[i][j] = tmp[i][j] # tmp[i][j] = tmp[i][j]/(step+1)*(step+1) # feature_mean[i][j] = feature_mean[i][j] + tmp[i][j] return feature_max def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename): feature_all = [] for i in range(rownum): print '============current id is :%d ==============' % (i + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = datafloder + result[0][0] im_ori = cv2.imread(url) cur_feature = calture_normal_feature(im_ori, caffenet, mean_data) feature_all.extend(cur_feature) feature_all = np.asarray(feature_all, dtype='float32') print feature_all.shape # 写入数据库 write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all) def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data, featurename, parallelnum=1): feature_max = [] for i in range(int(rownum / parallelnum)): print '============current id is :%d ==============' % (i * parallelnum + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % ( i * parallelnum + 1, (i + 1) * parallelnum) cursor.execute(sql) result = cursor.fetchall() im = np.zeros((parallelnum, picsize, picsize, 3)) for j in range(parallelnum): url = datafloder + result[j][0] im_ori = cv2.imread(url) im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize)) current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum) feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename): transformer = imageTransformer(caffenet, mean_data) feature_max = [] file = open(file_url, 'r') count = 0 while True: current_max = np.zeros((1, 4096)) current_max -= 9999 line = file.readline() line = line.strip() if not line: break count += 1 print '----------------------current ID is: {}---------------------'.format(count) url = datafloder + line img = cv2.imread(url) x = float(img.shape[0]) y = float(img.shape[1]) if x < 224 or y < 224: scale1 = x / y scale2 = y / x if scale1 < scale2: img = cv2.resize(img, (int(scale2 * 224), 224)) else: img = cv2.resize(img, (224, int(scale1 * 224))) x = img.shape[0] y = img.shape[1] if x > 451 and y > 451: steplength = 70 else: steplength = 35 step_x = (x - 224) / steplength + 1 step_y = (y - 224) / steplength + 1 for i in range(step_x): for j in range
x = i * steplength y = j * steplength crop = img[x:x + 224, y:y + 224, :] im = transformer.preprocess('data', crop) ''' im = np.transpose(crop, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for k in range(4096): if tmp[0][k] >= current_max[0][k]: current_max[0][k] = tmp[0][k] feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape file.close() write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) # ======================提取特征============================ # ========================================================== # feature6: “1+2”的feature PCA def get_feature6(db, cursor): FEATURE3_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE3", num=train_num) FEATURE3_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE3", num=test_num) FEATURE4_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE4", num=train_num) FEATURE4_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE4", num=test_num) FEATURE5_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE5", num=train_num) FEATURE5_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE5", num=test_num) FEATURE6_train_data = np.concatenate((FEATURE3_train_data, FEATURE4_train_data, FEATURE5_train_data), 1) FEATURE6_test_data = np.concatenate((FEATURE3_test_data, FEATURE4_test_data, FEATURE5_test_data), 1) # FEATURE6_train_data,FEATURE6_test_data = myPCA(pre_6_train_data,pre_6_test_data) write_feature_to_db(db, cursor, table_name=traintable, featurename='FEATURE6', feature=FEATURE6_train_data) write_feature_to_db(db, cursor, table_name=testtable, featurename='FEATURE6', feature=FEATURE6_test_data) # ============================SVM 与 PCA============================== # ==================================================================== def myPCA(train_feature, test_feature, component=0.99): pca = PCA(n_components=component) scaler = preprocessing.StandardScaler().fit(train_feature) train_feature_scale = scaler.transform(train_feature) test_feature_scale = scaler.transform(test_feature) pca.fit(train_feature_scale) train_feature_pca = pca.transform(train_feature_scale) test_feature_pca = pca.transform(test_feature_scale) return train_feature_pca, test_feature_pca def mySVM(train_data, test_data, train_label, test_label): clf = svm.SVC(kernel='linear') clf.fit(train_data, train_label) y_hat = clf.predict(test_data) # m_accuracy = metrics.accuracy_score(test_label, y_hat) # m_f1score = metrics.f1_score(y_true=test_label,y_pred=y_hat,average='macro') m_precision = metrics.precision_score(test_label, y_hat, average='macro') return m_precision # , m_accuracy def fsvm(db, cursor, featurename): print '从数据库取' + featurename + '数据...' train_data, train_label = read_feature(db, cursor, traintable, featurename, train_num) test_data, test_label = read_feature(db, cursor, testtable, featurename, test_num) print '训练SVM并测试...' result = mySVM(train_data, test_data, train_label, test_label) print 'the result is : {}'.format(result) # ===============结果计算和输出部分================= # calculate_result 计算正确率 召回率等 # calculate_detail 统计所有错误分类 # detailtofile 将错误分类 URL ID reallabel pre打印到文件 # matrixtoexcl 打印错误分类混淆矩阵 # ================================================= def detail_result(test_label, y_hat): m_accuracy = metrics.accuracy_score(test_label, y_hat) m_recall = metrics.recall_score(test_label, y_hat, average='macro') m_f1score = metrics.f1_score(test_label, y_hat, average='macro') return m_accuracy, m_recall, m_f1score def confusionMatrix(test_label, y_hat): detailList = [] matrixTabel = np.zeros([67, 67]) for i in range(len(test_label)): # if y_hat[i] != test_label[i]: tmp = [i + 1, test_label[i], y_hat[i]] detailList.append(tmp) matrixTabel[test_label[i], y_hat[i]] += 1 detailList = np.asarray(detailList, dtype='int32') return matrixTabel, detailList def detailToFile(numpy_data, outfileurl): # np.savetxt(outfileurl, numpy_data, fmt='%2.4f') np.savetxt(outfileurl, numpy_data, fmt='%d') def matrixToExcel(matrixTable, xlsurl): wbk = xlwt.Workbook() sheet = wbk.add_sheet('sheet 1') for i in range(67): sheet.write(0, i + 1, labeldata[i]) sheet.write(i + 1, 0, labeldata[i]) for i in range(len(matrixTable)): for j in range(len(matrixTable)): if matrixTable[i, j] != 0: sheet.write(j + 1, i + 1, matrixTable[i, j]) wbk.save(xlsurl) def writeInfo(fileURL, info): with open(fileURL, 'a') as f: f.write(info) f.close() def validationImage(db, cursor, tbname, rownum): for i in range(rownum): print '============current id is :%d ==============\r' % (i + 1), sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = result[0][0] url = file_root + url im_ori = cv2.imread(url) sp = im_ori.shape if __name__ == '__main__': print 'begin validation...' db, cursor = connectdb() validationImage(db, cursor, "indoor67train", 5360) validationImage(db, cursor, "indoor67test", 1340) print 'finish!'
(step_y):
conditional_block
function.py
# -*- coding:utf-8 -*- import os import sys projectRoot = '/media/wangchen/newdata1/wangchen/work/Indoor_caffe/' caffePath = '/home/wangchen/caffe' os.environ['CAFFE_ROOT'] = caffePath try: caffe_root = os.environ['CAFFE_ROOT'] + '/' print caffe_root except KeyError: raise KeyError("Define CAFFE_ROOT in ~/.bashrc") sys.path.insert(1, caffe_root + 'python/') sys.path.append \ (projectRoot + 'code/') import time import cv2 import caffe import numpy as np from sklearn import svm from sklearn import metrics import xlwt from sklearn import preprocessing from sklearn.decomposition import PCA import random from function_dataBase import * # =========================caffe接口====================== # ======================================================== def initcaffe(protourl, modelurl): caffe.set_device(1) caffe.set_mode_gpu() net = caffe.Net(protourl, modelurl, caffe.TEST) return net def imageTransformer(net, mean_data): transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2, 0, 1)) # c h w mean = mean_data.mean(1).mean(1) transformer.set_mean('data', mean) # transformer.set_raw_scale('data', 255) return transformer # =======================模型均值=========================== # readmeanfile : return mean data (w h c) # makeplaces205meandata: 生成pl205数据集的meanfile # ==================================================== def readmeanfile(meanfile): mean_blob = caffe.proto.caffe_pb2.BlobProto() mean_blob.ParseFromString(open(meanfile, 'rb').read()) # 将均值blob转为np.array mean_npy = caffe.io.blobproto_to_array(mean_blob) return mean_npy[0] # return mean_npy[0][:, 16:240, 16:240] def makeplaces205meandata(): out = np.zeros([3, 224, 224]) out[0, :, :] = 105.487823486 out[1, :, :] = 113.741088867 out[2, :, :] = 116.060394287 return out # =================特征计算=============================================================== # calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline) # calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool # get_feature_scala_256: 计算整个数据库normal_feature # get_pool_feature: 计算整个数据库pool_feature # get_cam_feature: # ======================================================================================== def calture_normal_feature(pic, caffenet, mean_data): transformer = imageTransformer(caffenet, mean_data) feature = np.zeros((1, 4096)) # cv2.imshow('0',pic) sp = pic.shape y = sp[0] x = sp[1] if x < y: y = (int)((y * 256) / x) x = 256 else: x = (int)((x * 256) / y) y = 256 im_256 = cv2.resize(pic, (x, y)) # cv2.imshow('1',im_256) # cv2.waitKey(0) im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)] im = transformer.preprocess('data', im_224) ''' im = np.transpose(im_224, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() feature[0] = caffenet.blobs['fc7'].data[0] return feature def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False): im = np.zeros((parallelnum, 3, cropsize, cropsize)) transformer = imageTransformer(caffenet, mean_data) feature_max = np.zeros((parallelnum, 4096)) feature_max = feature_max - 9999 # 对每个尺度框图并提取特征 step = (picsize - cropsize) // steplength for m in range(step + 1): for n in range(step + 1): x = m * steplength y = n * steplength if x > picsize - cropsize: x = picsize - cropsize if y > picsize - cropsize: y = picsize - cropsize crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c if dropLabel == True: dropInt = random.randint(1, 100) if dropInt > 75: continue for i in range(parallelnum): im[i] = transformer.preprocess('data', crop[i]) ''' im = np.transpose(crop, (0, 3, 1, 2)) im = im - mean_data ''' caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for i in range(parallelnum): for j in range(4096): if tmp[i][j] >= feature_max[i][j]: feature_max[i][j] = tmp[i][j] # tmp[i][j] = tmp[i][j]/(step+1)*(step+1) # feature_mean[i][j] = feature_mean[i][j] + tmp[i][j] return feature_max def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename): feature_all = [] for i in range(rownum): print '============current id is :%d ==========
sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = datafloder + result[0][0] im_ori = cv2.imread(url) cur_feature = calture_normal_feature(im_ori, caffenet, mean_data) feature_all.extend(cur_feature) feature_all = np.asarray(feature_all, dtype='float32') print feature_all.shape # 写入数据库 write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all) def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data, featurename, parallelnum=1): feature_max = [] for i in range(int(rownum / parallelnum)): print '============current id is :%d ==============' % (i * parallelnum + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % ( i * parallelnum + 1, (i + 1) * parallelnum) cursor.execute(sql) result = cursor.fetchall() im = np.zeros((parallelnum, picsize, picsize, 3)) for j in range(parallelnum): url = datafloder + result[j][0] im_ori = cv2.imread(url) im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize)) current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum) feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename): transformer = imageTransformer(caffenet, mean_data) feature_max = [] file = open(file_url, 'r') count = 0 while True: current_max = np.zeros((1, 4096)) current_max -= 9999 line = file.readline() line = line.strip() if not line: break count += 1 print '----------------------current ID is: {}---------------------'.format(count) url = datafloder + line img = cv2.imread(url) x = float(img.shape[0]) y = float(img.shape[1]) if x < 224 or y < 224: scale1 = x / y scale2 = y / x if scale1 < scale2: img = cv2.resize(img, (int(scale2 * 224), 224)) else: img = cv2.resize(img, (224, int(scale1 * 224))) x = img.shape[0] y = img.shape[1] if x > 451 and y > 451: steplength = 70 else: steplength = 35 step_x = (x - 224) / steplength + 1 step_y = (y - 224) / steplength + 1 for i in range(step_x): for j in range(step_y): x = i * steplength y = j * steplength crop = img[x:x + 224, y:y + 224, :] im = transformer.preprocess('data', crop) ''' im = np.transpose(crop, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for k in range(4096): if tmp[0][k] >= current_max[0][k]: current_max[0][k] = tmp[0][k] feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape file.close() write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) # ======================提取特征============================ # ========================================================== # feature6: “1+2”的feature PCA def get_feature6(db, cursor): FEATURE3_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE3", num=train_num) FEATURE3_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE3", num=test_num) FEATURE4_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE4", num=train_num) FEATURE4_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE4", num=test_num) FEATURE5_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE5", num=train_num) FEATURE5_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE5", num=test_num) FEATURE6_train_data = np.concatenate((FEATURE3_train_data, FEATURE4_train_data, FEATURE5_train_data), 1) FEATURE6_test_data = np.concatenate((FEATURE3_test_data, FEATURE4_test_data, FEATURE5_test_data), 1) # FEATURE6_train_data,FEATURE6_test_data = myPCA(pre_6_train_data,pre_6_test_data) write_feature_to_db(db, cursor, table_name=traintable, featurename='FEATURE6', feature=FEATURE6_train_data) write_feature_to_db(db, cursor, table_name=testtable, featurename='FEATURE6', feature=FEATURE6_test_data) # ============================SVM 与 PCA============================== # ==================================================================== def myPCA(train_feature, test_feature, component=0.99): pca = PCA(n_components=component) scaler = preprocessing.StandardScaler().fit(train_feature) train_feature_scale = scaler.transform(train_feature) test_feature_scale = scaler.transform(test_feature) pca.fit(train_feature_scale) train_feature_pca = pca.transform(train_feature_scale) test_feature_pca = pca.transform(test_feature_scale) return train_feature_pca, test_feature_pca def mySVM(train_data, test_data, train_label, test_label): clf = svm.SVC(kernel='linear') clf.fit(train_data, train_label) y_hat = clf.predict(test_data) # m_accuracy = metrics.accuracy_score(test_label, y_hat) # m_f1score = metrics.f1_score(y_true=test_label,y_pred=y_hat,average='macro') m_precision = metrics.precision_score(test_label, y_hat, average='macro') return m_precision # , m_accuracy def fsvm(db, cursor, featurename): print '从数据库取' + featurename + '数据...' train_data, train_label = read_feature(db, cursor, traintable, featurename, train_num) test_data, test_label = read_feature(db, cursor, testtable, featurename, test_num) print '训练SVM并测试...' result = mySVM(train_data, test_data, train_label, test_label) print 'the result is : {}'.format(result) # ===============结果计算和输出部分================= # calculate_result 计算正确率 召回率等 # calculate_detail 统计所有错误分类 # detailtofile 将错误分类 URL ID reallabel pre打印到文件 # matrixtoexcl 打印错误分类混淆矩阵 # ================================================= def detail_result(test_label, y_hat): m_accuracy = metrics.accuracy_score(test_label, y_hat) m_recall = metrics.recall_score(test_label, y_hat, average='macro') m_f1score = metrics.f1_score(test_label, y_hat, average='macro') return m_accuracy, m_recall, m_f1score def confusionMatrix(test_label, y_hat): detailList = [] matrixTabel = np.zeros([67, 67]) for i in range(len(test_label)): # if y_hat[i] != test_label[i]: tmp = [i + 1, test_label[i], y_hat[i]] detailList.append(tmp) matrixTabel[test_label[i], y_hat[i]] += 1 detailList = np.asarray(detailList, dtype='int32') return matrixTabel, detailList def detailToFile(numpy_data, outfileurl): # np.savetxt(outfileurl, numpy_data, fmt='%2.4f') np.savetxt(outfileurl, numpy_data, fmt='%d') def matrixToExcel(matrixTable, xlsurl): wbk = xlwt.Workbook() sheet = wbk.add_sheet('sheet 1') for i in range(67): sheet.write(0, i + 1, labeldata[i]) sheet.write(i + 1, 0, labeldata[i]) for i in range(len(matrixTable)): for j in range(len(matrixTable)): if matrixTable[i, j] != 0: sheet.write(j + 1, i + 1, matrixTable[i, j]) wbk.save(xlsurl) def writeInfo(fileURL, info): with open(fileURL, 'a') as f: f.write(info) f.close() def validationImage(db, cursor, tbname, rownum): for i in range(rownum): print '============current id is :%d ==============\r' % (i + 1), sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = result[0][0] url = file_root + url im_ori = cv2.imread(url) sp = im_ori.shape if __name__ == '__main__': print 'begin validation...' db, cursor = connectdb() validationImage(db, cursor, "indoor67train", 5360) validationImage(db, cursor, "indoor67test", 1340) print 'finish!'
====' % (i + 1)
identifier_name
function.py
# -*- coding:utf-8 -*- import os import sys projectRoot = '/media/wangchen/newdata1/wangchen/work/Indoor_caffe/' caffePath = '/home/wangchen/caffe' os.environ['CAFFE_ROOT'] = caffePath try: caffe_root = os.environ['CAFFE_ROOT'] + '/' print caffe_root except KeyError: raise KeyError("Define CAFFE_ROOT in ~/.bashrc") sys.path.insert(1, caffe_root + 'python/') sys.path.append \ (projectRoot + 'code/') import time import cv2 import caffe import numpy as np from sklearn import svm from sklearn import metrics import xlwt from sklearn import preprocessing from sklearn.decomposition import PCA import random from function_dataBase import * # =========================caffe接口====================== # ======================================================== def initcaffe(protourl, modelurl): caffe.set_device(1) caffe.set_mode_gpu() net = caffe.Net(protourl, modelurl, caffe.TEST) return net def imageTransformer(net, mean_data): transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2, 0, 1)) # c h w mean = mean_data.mean(1).mean(1) transformer.set_mean('data', mean) # transformer.set_raw_scale('data', 255) return transformer # =======================模型均值=========================== # readmeanfile : return mean data (w h c) # makeplaces205meandata: 生成pl205数据集的meanfile # ==================================================== def readmeanfile(meanfile): mean_blob = caffe.proto.caffe_pb2.BlobProto() mean_blob.ParseFromString(open(meanfile, 'rb').read()) # 将均值blob转为np.array mean_npy = caffe.io.blobproto_to_array(mean_blob) return mean_npy[0] # return mean_npy[0][:, 16:240, 16:240] def makeplaces205meandata(): out = np.zeros([3, 224, 224]) out[0, :, :] = 105.487823486 out[1, :, :] = 113.741088867 out[2, :, :] = 116.060394287 return out # =================特征计算=============================================================== # calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline) # calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool # get_feature_scala_256: 计算整个数据库normal_feature # get_pool_feature: 计算整个数据库pool_feature # get_cam_feature: # ======================================================================================== def calture_normal_feature(pic, caffenet, mean_data): transformer = imageTransformer(caffenet, mean_data) feature = np.zeros((1, 4096)) # cv2.imshow('0',pic) sp = pic.shape y = sp[0] x = sp[1] if x < y: y = (int)((y * 256) / x) x = 256 else: x = (int)((x * 256) / y) y = 256 im_256 = cv2.resize(pic, (x, y)) # cv2.imshow('1',im_256) # cv2.waitKey(0) im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)] im = transformer.preprocess('data', im_224) ''' im = np.transpose(im_224, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() feature[0] = caffenet.blobs['fc7'].data[0] return feature def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False): im = np.zeros((parallelnum, 3, cropsize, cropsize)) transformer = imageTransformer(caffenet, mean_data) feature_max = np.zeros((parallelnum, 4096)) feature_max = feature_max - 9999 # 对每个尺度框图并提取特征 step = (picsize - cropsize) // steplength for m in range(step + 1): for n in range(step + 1): x = m * steplength y = n * steplength if x > picsize - cropsize: x = picsize - cropsize if y > picsize - cropsize: y = picsize - cropsize crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c if dropLabel == True: dropInt = random.randint(1, 100) if dropInt > 75: continue for i in range(parallelnum): im[i] = transformer.preprocess('data', crop[i]) ''' im = np.transpose(crop, (0, 3, 1, 2)) im = im - mean_data ''' caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for i in range(parallelnum): for j in range(4096): if tmp[i][j] >= feature_max[i][j]: feature_max[i][j] = tmp[i][j] # tmp[i][j] = tmp[i][j]/(step+1)*(step+1) # feature_mean[i][j] = feature_mean[i][j] + tmp[i][j] return feature_max def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename): feature_all = [] for i in range(rownum): print '============current id is :%d ==============' % (i + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cur
t(rownum / parallelnum)): print '============current id is :%d ==============' % (i * parallelnum + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % ( i * parallelnum + 1, (i + 1) * parallelnum) cursor.execute(sql) result = cursor.fetchall() im = np.zeros((parallelnum, picsize, picsize, 3)) for j in range(parallelnum): url = datafloder + result[j][0] im_ori = cv2.imread(url) im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize)) current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum) feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename): transformer = imageTransformer(caffenet, mean_data) feature_max = [] file = open(file_url, 'r') count = 0 while True: current_max = np.zeros((1, 4096)) current_max -= 9999 line = file.readline() line = line.strip() if not line: break count += 1 print '----------------------current ID is: {}---------------------'.format(count) url = datafloder + line img = cv2.imread(url) x = float(img.shape[0]) y = float(img.shape[1]) if x < 224 or y < 224: scale1 = x / y scale2 = y / x if scale1 < scale2: img = cv2.resize(img, (int(scale2 * 224), 224)) else: img = cv2.resize(img, (224, int(scale1 * 224))) x = img.shape[0] y = img.shape[1] if x > 451 and y > 451: steplength = 70 else: steplength = 35 step_x = (x - 224) / steplength + 1 step_y = (y - 224) / steplength + 1 for i in range(step_x): for j in range(step_y): x = i * steplength y = j * steplength crop = img[x:x + 224, y:y + 224, :] im = transformer.preprocess('data', crop) ''' im = np.transpose(crop, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for k in range(4096): if tmp[0][k] >= current_max[0][k]: current_max[0][k] = tmp[0][k] feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape file.close() write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) # ======================提取特征============================ # ========================================================== # feature6: “1+2”的feature PCA def get_feature6(db, cursor): FEATURE3_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE3", num=train_num) FEATURE3_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE3", num=test_num) FEATURE4_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE4", num=train_num) FEATURE4_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE4", num=test_num) FEATURE5_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE5", num=train_num) FEATURE5_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE5", num=test_num) FEATURE6_train_data = np.concatenate((FEATURE3_train_data, FEATURE4_train_data, FEATURE5_train_data), 1) FEATURE6_test_data = np.concatenate((FEATURE3_test_data, FEATURE4_test_data, FEATURE5_test_data), 1) # FEATURE6_train_data,FEATURE6_test_data = myPCA(pre_6_train_data,pre_6_test_data) write_feature_to_db(db, cursor, table_name=traintable, featurename='FEATURE6', feature=FEATURE6_train_data) write_feature_to_db(db, cursor, table_name=testtable, featurename='FEATURE6', feature=FEATURE6_test_data) # ============================SVM 与 PCA============================== # ==================================================================== def myPCA(train_feature, test_feature, component=0.99): pca = PCA(n_components=component) scaler = preprocessing.StandardScaler().fit(train_feature) train_feature_scale = scaler.transform(train_feature) test_feature_scale = scaler.transform(test_feature) pca.fit(train_feature_scale) train_feature_pca = pca.transform(train_feature_scale) test_feature_pca = pca.transform(test_feature_scale) return train_feature_pca, test_feature_pca def mySVM(train_data, test_data, train_label, test_label): clf = svm.SVC(kernel='linear') clf.fit(train_data, train_label) y_hat = clf.predict(test_data) # m_accuracy = metrics.accuracy_score(test_label, y_hat) # m_f1score = metrics.f1_score(y_true=test_label,y_pred=y_hat,average='macro') m_precision = metrics.precision_score(test_label, y_hat, average='macro') return m_precision # , m_accuracy def fsvm(db, cursor, featurename): print '从数据库取' + featurename + '数据...' train_data, train_label = read_feature(db, cursor, traintable, featurename, train_num) test_data, test_label = read_feature(db, cursor, testtable, featurename, test_num) print '训练SVM并测试...' result = mySVM(train_data, test_data, train_label, test_label) print 'the result is : {}'.format(result) # ===============结果计算和输出部分================= # calculate_result 计算正确率 召回率等 # calculate_detail 统计所有错误分类 # detailtofile 将错误分类 URL ID reallabel pre打印到文件 # matrixtoexcl 打印错误分类混淆矩阵 # ================================================= def detail_result(test_label, y_hat): m_accuracy = metrics.accuracy_score(test_label, y_hat) m_recall = metrics.recall_score(test_label, y_hat, average='macro') m_f1score = metrics.f1_score(test_label, y_hat, average='macro') return m_accuracy, m_recall, m_f1score def confusionMatrix(test_label, y_hat): detailList = [] matrixTabel = np.zeros([67, 67]) for i in range(len(test_label)): # if y_hat[i] != test_label[i]: tmp = [i + 1, test_label[i], y_hat[i]] detailList.append(tmp) matrixTabel[test_label[i], y_hat[i]] += 1 detailList = np.asarray(detailList, dtype='int32') return matrixTabel, detailList def detailToFile(numpy_data, outfileurl): # np.savetxt(outfileurl, numpy_data, fmt='%2.4f') np.savetxt(outfileurl, numpy_data, fmt='%d') def matrixToExcel(matrixTable, xlsurl): wbk = xlwt.Workbook() sheet = wbk.add_sheet('sheet 1') for i in range(67): sheet.write(0, i + 1, labeldata[i]) sheet.write(i + 1, 0, labeldata[i]) for i in range(len(matrixTable)): for j in range(len(matrixTable)): if matrixTable[i, j] != 0: sheet.write(j + 1, i + 1, matrixTable[i, j]) wbk.save(xlsurl) def writeInfo(fileURL, info): with open(fileURL, 'a') as f: f.write(info) f.close() def validationImage(db, cursor, tbname, rownum): for i in range(rownum): print '============current id is :%d ==============\r' % (i + 1), sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = result[0][0] url = file_root + url im_ori = cv2.imread(url) sp = im_ori.shape if __name__ == '__main__': print 'begin validation...' db, cursor = connectdb() validationImage(db, cursor, "indoor67train", 5360) validationImage(db, cursor, "indoor67test", 1340) print 'finish!'
sor.execute(sql) result = cursor.fetchall() url = datafloder + result[0][0] im_ori = cv2.imread(url) cur_feature = calture_normal_feature(im_ori, caffenet, mean_data) feature_all.extend(cur_feature) feature_all = np.asarray(feature_all, dtype='float32') print feature_all.shape # 写入数据库 write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all) def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data, featurename, parallelnum=1): feature_max = [] for i in range(in
identifier_body
function.py
# -*- coding:utf-8 -*- import os import sys projectRoot = '/media/wangchen/newdata1/wangchen/work/Indoor_caffe/' caffePath = '/home/wangchen/caffe' os.environ['CAFFE_ROOT'] = caffePath try: caffe_root = os.environ['CAFFE_ROOT'] + '/' print caffe_root except KeyError: raise KeyError("Define CAFFE_ROOT in ~/.bashrc") sys.path.insert(1, caffe_root + 'python/') sys.path.append \ (projectRoot + 'code/') import time import cv2 import caffe import numpy as np from sklearn import svm from sklearn import metrics import xlwt from sklearn import preprocessing from sklearn.decomposition import PCA import random from function_dataBase import * # =========================caffe接口====================== # ======================================================== def initcaffe(protourl, modelurl): caffe.set_device(1) caffe.set_mode_gpu() net = caffe.Net(protourl, modelurl, caffe.TEST) return net def imageTransformer(net, mean_data): transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2, 0, 1)) # c h w mean = mean_data.mean(1).mean(1) transformer.set_mean('data', mean) # transformer.set_raw_scale('data', 255) return transformer # =======================模型均值=========================== # readmeanfile : return mean data (w h c) # makeplaces205meandata: 生成pl205数据集的meanfile # ==================================================== def readmeanfile(meanfile): mean_blob = caffe.proto.caffe_pb2.BlobProto() mean_blob.ParseFromString(open(meanfile, 'rb').read()) # 将均值blob转为np.array mean_npy = caffe.io.blobproto_to_array(mean_blob) return mean_npy[0] # return mean_npy[0][:, 16:240, 16:240] def makeplaces205meandata(): out = np.zeros([3, 224, 224]) out[0, :, :] = 105.487823486 out[1, :, :] = 113.741088867 out[2, :, :] = 116.060394287 return out # =================特征计算=============================================================== # calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline) # calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool # get_feature_scala_256: 计算整个数据库normal_feature # get_pool_feature: 计算整个数据库pool_feature # get_cam_feature: # ======================================================================================== def calture_normal_feature(pic, caffenet, mean_data): transformer = imageTransformer(caffenet, mean_data) feature = np.zeros((1, 4096)) # cv2.imshow('0',pic) sp = pic.shape y = sp[0] x = sp[1] if x < y: y = (int)((y * 256) / x) x = 256 else: x = (int)((x * 256) / y) y = 256 im_256 = cv2.resize(pic, (x, y)) # cv2.imshow('1',im_256) # cv2.waitKey(0) im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)] im = transformer.preprocess('data', im_224) ''' im = np.transpose(im_224, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() feature[0] = caffenet.blobs['fc7'].data[0] return feature def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False): im = np.zeros((parallelnum, 3, cropsize, cropsize)) transformer = imageTransformer(caffenet, mean_data) feature_max = np.zeros((parallelnum, 4096)) feature_max = feature_max - 9999 # 对每个尺度框图并提取特征 step = (picsize - cropsize) // steplength for m in range(step + 1): for n in range(step + 1): x = m * steplength y = n * steplength if x > picsize - cropsize: x = picsize - cropsize if y > picsize - cropsize: y = picsize - cropsize crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c if dropLabel == True: dropInt = random.randint(1, 100) if dropInt > 75: continue for i in range(parallelnum): im[i] = transformer.preprocess('data', crop[i]) ''' im = np.transpose(crop, (0, 3, 1, 2)) im = im - mean_data ''' caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for i in range(parallelnum): for j in range(4096): if tmp[i][j] >= feature_max[i][j]: feature_max[i][j] = tmp[i][j] # tmp[i][j] = tmp[i][j]/(step+1)*(step+1) # feature_mean[i][j] = feature_mean[i][j] + tmp[i][j] return feature_max def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename): feature_all = [] for i in range(rownum): print '============current id is :%d ==============' % (i + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = datafloder + result[0][0] im_ori = cv2.imread(url) cur_feature = calture_normal_feature(im_ori, caffenet, mean_data) feature_all.extend(cur_feature) feature_all = np.asarray(feature_all, dtype='float32') print feature_all.shape # 写入数据库 write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all) def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data, featurename, parallelnum=1): feature_max = [] for i in range(int(rownum / parallelnum)): print '============current id is :%d ==============' % (i * parallelnum + 1) sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % ( i * parallelnum + 1, (i + 1) * parallelnum) cursor.execute(sql) result = cursor.fetchall() im = np.zeros((parallelnum, picsize, picsize, 3)) for j in range(parallelnum): url = datafloder + result[j][0] im_ori = cv2.imread(url) im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize)) current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum) feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
feature_max = [] file = open(file_url, 'r') count = 0 while True: current_max = np.zeros((1, 4096)) current_max -= 9999 line = file.readline() line = line.strip() if not line: break count += 1 print '----------------------current ID is: {}---------------------'.format(count) url = datafloder + line img = cv2.imread(url) x = float(img.shape[0]) y = float(img.shape[1]) if x < 224 or y < 224: scale1 = x / y scale2 = y / x if scale1 < scale2: img = cv2.resize(img, (int(scale2 * 224), 224)) else: img = cv2.resize(img, (224, int(scale1 * 224))) x = img.shape[0] y = img.shape[1] if x > 451 and y > 451: steplength = 70 else: steplength = 35 step_x = (x - 224) / steplength + 1 step_y = (y - 224) / steplength + 1 for i in range(step_x): for j in range(step_y): x = i * steplength y = j * steplength crop = img[x:x + 224, y:y + 224, :] im = transformer.preprocess('data', crop) ''' im = np.transpose(crop, (2, 0, 1)) im = im - mean_data ''' im = np.resize(im, (1, 3, 224, 224)) caffenet.blobs['data'].data[...] = im caffenet.forward() tmp = caffenet.blobs['fc7'].data for k in range(4096): if tmp[0][k] >= current_max[0][k]: current_max[0][k] = tmp[0][k] feature_max.extend(current_max) feature_max = np.array(feature_max, dtype='float32') print feature_max.shape file.close() write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max) # ======================提取特征============================ # ========================================================== # feature6: “1+2”的feature PCA def get_feature6(db, cursor): FEATURE3_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE3", num=train_num) FEATURE3_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE3", num=test_num) FEATURE4_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE4", num=train_num) FEATURE4_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE4", num=test_num) FEATURE5_train_data, train_label = read_feature(db, cursor, table_name=traintable, featurename="FEATURE5", num=train_num) FEATURE5_test_data, test_label = read_feature(db, cursor, table_name=testtable, featurename="FEATURE5", num=test_num) FEATURE6_train_data = np.concatenate((FEATURE3_train_data, FEATURE4_train_data, FEATURE5_train_data), 1) FEATURE6_test_data = np.concatenate((FEATURE3_test_data, FEATURE4_test_data, FEATURE5_test_data), 1) # FEATURE6_train_data,FEATURE6_test_data = myPCA(pre_6_train_data,pre_6_test_data) write_feature_to_db(db, cursor, table_name=traintable, featurename='FEATURE6', feature=FEATURE6_train_data) write_feature_to_db(db, cursor, table_name=testtable, featurename='FEATURE6', feature=FEATURE6_test_data) # ============================SVM 与 PCA============================== # ==================================================================== def myPCA(train_feature, test_feature, component=0.99): pca = PCA(n_components=component) scaler = preprocessing.StandardScaler().fit(train_feature) train_feature_scale = scaler.transform(train_feature) test_feature_scale = scaler.transform(test_feature) pca.fit(train_feature_scale) train_feature_pca = pca.transform(train_feature_scale) test_feature_pca = pca.transform(test_feature_scale) return train_feature_pca, test_feature_pca def mySVM(train_data, test_data, train_label, test_label): clf = svm.SVC(kernel='linear') clf.fit(train_data, train_label) y_hat = clf.predict(test_data) # m_accuracy = metrics.accuracy_score(test_label, y_hat) # m_f1score = metrics.f1_score(y_true=test_label,y_pred=y_hat,average='macro') m_precision = metrics.precision_score(test_label, y_hat, average='macro') return m_precision # , m_accuracy def fsvm(db, cursor, featurename): print '从数据库取' + featurename + '数据...' train_data, train_label = read_feature(db, cursor, traintable, featurename, train_num) test_data, test_label = read_feature(db, cursor, testtable, featurename, test_num) print '训练SVM并测试...' result = mySVM(train_data, test_data, train_label, test_label) print 'the result is : {}'.format(result) # ===============结果计算和输出部分================= # calculate_result 计算正确率 召回率等 # calculate_detail 统计所有错误分类 # detailtofile 将错误分类 URL ID reallabel pre打印到文件 # matrixtoexcl 打印错误分类混淆矩阵 # ================================================= def detail_result(test_label, y_hat): m_accuracy = metrics.accuracy_score(test_label, y_hat) m_recall = metrics.recall_score(test_label, y_hat, average='macro') m_f1score = metrics.f1_score(test_label, y_hat, average='macro') return m_accuracy, m_recall, m_f1score def confusionMatrix(test_label, y_hat): detailList = [] matrixTabel = np.zeros([67, 67]) for i in range(len(test_label)): # if y_hat[i] != test_label[i]: tmp = [i + 1, test_label[i], y_hat[i]] detailList.append(tmp) matrixTabel[test_label[i], y_hat[i]] += 1 detailList = np.asarray(detailList, dtype='int32') return matrixTabel, detailList def detailToFile(numpy_data, outfileurl): # np.savetxt(outfileurl, numpy_data, fmt='%2.4f') np.savetxt(outfileurl, numpy_data, fmt='%d') def matrixToExcel(matrixTable, xlsurl): wbk = xlwt.Workbook() sheet = wbk.add_sheet('sheet 1') for i in range(67): sheet.write(0, i + 1, labeldata[i]) sheet.write(i + 1, 0, labeldata[i]) for i in range(len(matrixTable)): for j in range(len(matrixTable)): if matrixTable[i, j] != 0: sheet.write(j + 1, i + 1, matrixTable[i, j]) wbk.save(xlsurl) def writeInfo(fileURL, info): with open(fileURL, 'a') as f: f.write(info) f.close() def validationImage(db, cursor, tbname, rownum): for i in range(rownum): print '============current id is :%d ==============\r' % (i + 1), sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1) cursor.execute(sql) result = cursor.fetchall() url = result[0][0] url = file_root + url im_ori = cv2.imread(url) sp = im_ori.shape if __name__ == '__main__': print 'begin validation...' db, cursor = connectdb() validationImage(db, cursor, "indoor67train", 5360) validationImage(db, cursor, "indoor67test", 1340) print 'finish!'
def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename): transformer = imageTransformer(caffenet, mean_data)
random_line_split
vue2jsx.ts
import ts from 'typescript'; type Dictionary<T> = { [key: string]: T }; type Nullable<T> = T | null; class
{ constructor(public tagName: string = "", public parentNode: Nullable<ParsedNode> = null) { } localVariables: string[] = []; childNodes: ParsedNode[] = []; startText: string = ""; endText: string = ""; startIf: boolean = false; condition: string = ""; postProcessor: { (text: string): string } = t => t; appendChild(tagName: string) { var newNode = new ParsedNode(tagName, this); this.childNodes.push(newNode); return newNode; } render() { let jsx; if (this.startText == "<template>") { jsx = '[ '; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); if (child.tagName != "#text") jsx += ", "; } jsx = jsx.replace(/,(\s*)$/, '$1') + ' ]'; } else { jsx = this.startText; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); } jsx += this.endText; } return this.postProcessor(jsx); } } function vue2jsx(html: string) { var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/, endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/; var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g; var special: Dictionary<number> = { script: 1, style: 1 }; var index, chars, match, stack: any[] & { last?: Function } = [], last = html; stack.last = function () { return this[this.length - 1]; }; var currentNode = new ParsedNode(); var rootNode = currentNode; while (html) { chars = true; // Make sure we're not in a script or style element if (!stack.last() || !special[stack.last()]) { // Comment if (html.indexOf("<!--") == 0) { index = html.indexOf("-->"); if (index >= 0) { html = html.substring(index + 3); chars = false; } // end tag } else if (html.indexOf("</") == 0) { match = html.match(endTagRegex); if (match) { html = html.substring(match[0].length); currentNode.endText = match[0]; currentNode = currentNode.parentNode!; chars = false; } // start tag } else if (html.indexOf("<") == 0) { match = html.match(startTagRegex); if (match) { html = html.substring(match[0].length); currentNode = currentNode.appendChild(match[1]); let startTagJsx = ""; let attrsMatch = match[2].match(attrRegex); let attrsJsx = ""; if (attrsMatch) { for (var i = 0; i < attrsMatch.length; i++) { if (attrsMatch[i].replace(/^\s+/, '') == '') continue; let tagName = match[1]; let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, ''); let value = attrsMatch[i].replace(/^[^=]+=/, ''); if (attrsMatch[i].indexOf('=') === -1) value = true; let attrJsx = processAttr(tagName, name, value, currentNode); if (attrJsx) attrsJsx += " " + attrJsx; } } startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">"; currentNode.startText = startTagJsx; if (match[match.length - 1] == "/") currentNode = currentNode.parentNode!; chars = false; } } if (chars) { index = html.indexOf("<"); var text = index < 0 ? html : html.substring(0, index); html = index < 0 ? "" : html.substring(index); let textNode = currentNode.appendChild("#text"); textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }"); } } else { html = html.substring(html.indexOf("</" + stack.last() + ">")); } if (html == last) { throw new Error("Parse Error at: " + html) } last = html; } return rootNode; } function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) { let jsxAttr = name + "=" + value; if (value === true) { jsxAttr = name; } else if (name.indexOf("v-on:") == 0) { name = "on" + name.substr(5); value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode); let param = "()"; let condition = ""; if (name.endsWith(".enter")) { name = name.slice(0, -6); param = "e"; condition = "e.keyCode == 13"; } if (value.indexOf(';') === -1) value = `${param} => ${condition ? condition + " && " : ""}${value}`; else if (condition) value = `${param} => { if (${condition}) { ${value} } }`; else value = `${param} => { ${value} }`; jsxAttr = name + "={ " + value + " }"; } else if (name.indexOf("v-bind:") == 0) { name = name.substr(7); jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }"; } else if (name == "v-for") { let [elem, elems] = value.slice(1, -1).split(' in '); if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1) elem = "(" + elem + ")"; jsxAttr = ""; currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(','); currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`; } else if (name == "v-if") { jsxAttr = ""; const condition = processJs(value.slice(1, -1), currentNode); currentNode.startIf = true; currentNode.condition = condition; currentNode.postProcessor = t => `{ ${condition} && ${t} }`; } else if (name == "v-else-if") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; const condition = processJs(value.slice(1, -1), currentNode); currentNode.condition = condition; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`; } else if (name == "v-else") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${t} }`; } else if (name == "v-model") { const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange"; const model = processJs(value.slice(1, -1), currentNode); jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`; } return jsxAttr; } function processJs(jsCode: string, currentNode: ParsedNode) { let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5); let localVariables = []; while (currentNode.parentNode) { currentNode = currentNode.parentNode; localVariables = localVariables.concat(currentNode.localVariables); } let positions: number[] = []; analyse(fileNode); positions .map(p => fixPos(--p)) .filter(p => /[a-z$_]/.test(jsCode.substr(p, 1))) .filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1) .sort((a, b) => b - a) .forEach(p => jsCode = jsCode.substr(0, p) + "this." + jsCode.substr(p)); return jsCode; function analyse(node: ts.Node) { if (node.kind == ts.SyntaxKind.ParenthesizedExpression) { const expr = <ts.ParenthesizedExpression>node; if (expr.expression.kind == ts.SyntaxKind.Identifier) positions.push(expr.expression.pos); } if (node.kind == ts.SyntaxKind.ElementAccessExpression || node.kind == ts.SyntaxKind.PropertyAccessExpression) { positions.push(node.pos); return; } if (node.kind == ts.SyntaxKind.CallExpression && (<ts.CallExpression>node).expression.kind == ts.SyntaxKind.Identifier) positions.push(node.pos); if (node.kind == ts.SyntaxKind.BinaryExpression) { const binExpr = <ts.BinaryExpression>node; if (binExpr.right.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.right.pos); if (binExpr.left.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.left.pos); } ts.forEachChild(node, analyse); } function fixPos(pos) { while(/\s/.test(jsCode.substr(pos, 1)) && pos < jsCode.length) pos++; return pos; } } export = vue2jsx;
ParsedNode
identifier_name
vue2jsx.ts
import ts from 'typescript'; type Dictionary<T> = { [key: string]: T }; type Nullable<T> = T | null; class ParsedNode { constructor(public tagName: string = "", public parentNode: Nullable<ParsedNode> = null) { } localVariables: string[] = []; childNodes: ParsedNode[] = []; startText: string = ""; endText: string = ""; startIf: boolean = false; condition: string = ""; postProcessor: { (text: string): string } = t => t; appendChild(tagName: string) { var newNode = new ParsedNode(tagName, this); this.childNodes.push(newNode); return newNode; } render() { let jsx; if (this.startText == "<template>") { jsx = '[ '; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); if (child.tagName != "#text") jsx += ", "; } jsx = jsx.replace(/,(\s*)$/, '$1') + ' ]'; } else { jsx = this.startText; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); } jsx += this.endText; } return this.postProcessor(jsx); } } function vue2jsx(html: string) { var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/, endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/; var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g; var special: Dictionary<number> = { script: 1, style: 1 }; var index, chars, match, stack: any[] & { last?: Function } = [], last = html; stack.last = function () { return this[this.length - 1]; }; var currentNode = new ParsedNode(); var rootNode = currentNode; while (html) { chars = true; // Make sure we're not in a script or style element if (!stack.last() || !special[stack.last()]) { // Comment if (html.indexOf("<!--") == 0) { index = html.indexOf("-->"); if (index >= 0) { html = html.substring(index + 3); chars = false; } // end tag } else if (html.indexOf("</") == 0) { match = html.match(endTagRegex); if (match) { html = html.substring(match[0].length); currentNode.endText = match[0]; currentNode = currentNode.parentNode!; chars = false; } // start tag } else if (html.indexOf("<") == 0) { match = html.match(startTagRegex); if (match) { html = html.substring(match[0].length); currentNode = currentNode.appendChild(match[1]); let startTagJsx = ""; let attrsMatch = match[2].match(attrRegex); let attrsJsx = ""; if (attrsMatch) { for (var i = 0; i < attrsMatch.length; i++) { if (attrsMatch[i].replace(/^\s+/, '') == '') continue; let tagName = match[1]; let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, ''); let value = attrsMatch[i].replace(/^[^=]+=/, ''); if (attrsMatch[i].indexOf('=') === -1) value = true; let attrJsx = processAttr(tagName, name, value, currentNode); if (attrJsx) attrsJsx += " " + attrJsx; } } startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">"; currentNode.startText = startTagJsx; if (match[match.length - 1] == "/") currentNode = currentNode.parentNode!; chars = false; } } if (chars) { index = html.indexOf("<"); var text = index < 0 ? html : html.substring(0, index); html = index < 0 ? "" : html.substring(index); let textNode = currentNode.appendChild("#text"); textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }"); } } else { html = html.substring(html.indexOf("</" + stack.last() + ">")); } if (html == last) { throw new Error("Parse Error at: " + html) } last = html; } return rootNode; } function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) { let jsxAttr = name + "=" + value; if (value === true) { jsxAttr = name; } else if (name.indexOf("v-on:") == 0) { name = "on" + name.substr(5); value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode); let param = "()"; let condition = ""; if (name.endsWith(".enter")) { name = name.slice(0, -6); param = "e"; condition = "e.keyCode == 13"; } if (value.indexOf(';') === -1) value = `${param} => ${condition ? condition + " && " : ""}${value}`; else if (condition) value = `${param} => { if (${condition}) { ${value} } }`; else value = `${param} => { ${value} }`; jsxAttr = name + "={ " + value + " }"; } else if (name.indexOf("v-bind:") == 0) { name = name.substr(7); jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }"; } else if (name == "v-for") { let [elem, elems] = value.slice(1, -1).split(' in '); if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1) elem = "(" + elem + ")"; jsxAttr = ""; currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(','); currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`; } else if (name == "v-if") { jsxAttr = ""; const condition = processJs(value.slice(1, -1), currentNode); currentNode.startIf = true; currentNode.condition = condition; currentNode.postProcessor = t => `{ ${condition} && ${t} }`; } else if (name == "v-else-if") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; const condition = processJs(value.slice(1, -1), currentNode); currentNode.condition = condition; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`; } else if (name == "v-else") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${t} }`; } else if (name == "v-model") { const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange"; const model = processJs(value.slice(1, -1), currentNode); jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`; } return jsxAttr; } function processJs(jsCode: string, currentNode: ParsedNode) { let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5); let localVariables = []; while (currentNode.parentNode) { currentNode = currentNode.parentNode; localVariables = localVariables.concat(currentNode.localVariables); } let positions: number[] = []; analyse(fileNode); positions .map(p => fixPos(--p)) .filter(p => /[a-z$_]/.test(jsCode.substr(p, 1))) .filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1) .sort((a, b) => b - a) .forEach(p => jsCode = jsCode.substr(0, p) + "this." + jsCode.substr(p)); return jsCode; function analyse(node: ts.Node) { if (node.kind == ts.SyntaxKind.ParenthesizedExpression) { const expr = <ts.ParenthesizedExpression>node; if (expr.expression.kind == ts.SyntaxKind.Identifier) positions.push(expr.expression.pos); } if (node.kind == ts.SyntaxKind.ElementAccessExpression || node.kind == ts.SyntaxKind.PropertyAccessExpression) { positions.push(node.pos); return; } if (node.kind == ts.SyntaxKind.CallExpression && (<ts.CallExpression>node).expression.kind == ts.SyntaxKind.Identifier) positions.push(node.pos); if (node.kind == ts.SyntaxKind.BinaryExpression) { const binExpr = <ts.BinaryExpression>node; if (binExpr.right.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.right.pos); if (binExpr.left.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.left.pos); } ts.forEachChild(node, analyse); } function fixPos(pos) { while(/\s/.test(jsCode.substr(pos, 1)) && pos < jsCode.length)
} export = vue2jsx;
pos++; return pos; }
random_line_split
vue2jsx.ts
import ts from 'typescript'; type Dictionary<T> = { [key: string]: T }; type Nullable<T> = T | null; class ParsedNode { constructor(public tagName: string = "", public parentNode: Nullable<ParsedNode> = null) { } localVariables: string[] = []; childNodes: ParsedNode[] = []; startText: string = ""; endText: string = ""; startIf: boolean = false; condition: string = ""; postProcessor: { (text: string): string } = t => t; appendChild(tagName: string) { var newNode = new ParsedNode(tagName, this); this.childNodes.push(newNode); return newNode; } render() { let jsx; if (this.startText == "<template>") { jsx = '[ '; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); if (child.tagName != "#text") jsx += ", "; } jsx = jsx.replace(/,(\s*)$/, '$1') + ' ]'; } else { jsx = this.startText; for(let i = 0; i < this.childNodes.length; i++) { const child = this.childNodes[i]; jsx += child.render(); } jsx += this.endText; } return this.postProcessor(jsx); } } function vue2jsx(html: string) { var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/, endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/; var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g; var special: Dictionary<number> = { script: 1, style: 1 }; var index, chars, match, stack: any[] & { last?: Function } = [], last = html; stack.last = function () { return this[this.length - 1]; }; var currentNode = new ParsedNode(); var rootNode = currentNode; while (html) { chars = true; // Make sure we're not in a script or style element if (!stack.last() || !special[stack.last()]) { // Comment if (html.indexOf("<!--") == 0) { index = html.indexOf("-->"); if (index >= 0) { html = html.substring(index + 3); chars = false; } // end tag } else if (html.indexOf("</") == 0) { match = html.match(endTagRegex); if (match) { html = html.substring(match[0].length); currentNode.endText = match[0]; currentNode = currentNode.parentNode!; chars = false; } // start tag } else if (html.indexOf("<") == 0) { match = html.match(startTagRegex); if (match) { html = html.substring(match[0].length); currentNode = currentNode.appendChild(match[1]); let startTagJsx = ""; let attrsMatch = match[2].match(attrRegex); let attrsJsx = ""; if (attrsMatch) { for (var i = 0; i < attrsMatch.length; i++) { if (attrsMatch[i].replace(/^\s+/, '') == '') continue; let tagName = match[1]; let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, ''); let value = attrsMatch[i].replace(/^[^=]+=/, ''); if (attrsMatch[i].indexOf('=') === -1) value = true; let attrJsx = processAttr(tagName, name, value, currentNode); if (attrJsx) attrsJsx += " " + attrJsx; } } startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">"; currentNode.startText = startTagJsx; if (match[match.length - 1] == "/") currentNode = currentNode.parentNode!; chars = false; } } if (chars) { index = html.indexOf("<"); var text = index < 0 ? html : html.substring(0, index); html = index < 0 ? "" : html.substring(index); let textNode = currentNode.appendChild("#text"); textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }"); } } else { html = html.substring(html.indexOf("</" + stack.last() + ">")); } if (html == last) { throw new Error("Parse Error at: " + html) } last = html; } return rootNode; } function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) { let jsxAttr = name + "=" + value; if (value === true) { jsxAttr = name; } else if (name.indexOf("v-on:") == 0) { name = "on" + name.substr(5); value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode); let param = "()"; let condition = ""; if (name.endsWith(".enter")) { name = name.slice(0, -6); param = "e"; condition = "e.keyCode == 13"; } if (value.indexOf(';') === -1) value = `${param} => ${condition ? condition + " && " : ""}${value}`; else if (condition) value = `${param} => { if (${condition}) { ${value} } }`; else value = `${param} => { ${value} }`; jsxAttr = name + "={ " + value + " }"; } else if (name.indexOf("v-bind:") == 0) { name = name.substr(7); jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }"; } else if (name == "v-for") { let [elem, elems] = value.slice(1, -1).split(' in '); if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1) elem = "(" + elem + ")"; jsxAttr = ""; currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(','); currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`; } else if (name == "v-if") { jsxAttr = ""; const condition = processJs(value.slice(1, -1), currentNode); currentNode.startIf = true; currentNode.condition = condition; currentNode.postProcessor = t => `{ ${condition} && ${t} }`; } else if (name == "v-else-if") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; const condition = processJs(value.slice(1, -1), currentNode); currentNode.condition = condition; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`; } else if (name == "v-else") { jsxAttr = ""; const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text"); const prevNode = children[children.length - 2]; if (prevNode.startIf) prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`; else prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`; currentNode.postProcessor = t => ` : ${t} }`; } else if (name == "v-model") { const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange"; const model = processJs(value.slice(1, -1), currentNode); jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`; } return jsxAttr; } function processJs(jsCode: string, currentNode: ParsedNode) { let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5); let localVariables = []; while (currentNode.parentNode) { currentNode = currentNode.parentNode; localVariables = localVariables.concat(currentNode.localVariables); } let positions: number[] = []; analyse(fileNode); positions .map(p => fixPos(--p)) .filter(p => /[a-z$_]/.test(jsCode.substr(p, 1))) .filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1) .sort((a, b) => b - a) .forEach(p => jsCode = jsCode.substr(0, p) + "this." + jsCode.substr(p)); return jsCode; function analyse(node: ts.Node) { if (node.kind == ts.SyntaxKind.ParenthesizedExpression) { const expr = <ts.ParenthesizedExpression>node; if (expr.expression.kind == ts.SyntaxKind.Identifier) positions.push(expr.expression.pos); } if (node.kind == ts.SyntaxKind.ElementAccessExpression || node.kind == ts.SyntaxKind.PropertyAccessExpression) { positions.push(node.pos); return; } if (node.kind == ts.SyntaxKind.CallExpression && (<ts.CallExpression>node).expression.kind == ts.SyntaxKind.Identifier) positions.push(node.pos); if (node.kind == ts.SyntaxKind.BinaryExpression) { const binExpr = <ts.BinaryExpression>node; if (binExpr.right.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.right.pos); if (binExpr.left.kind == ts.SyntaxKind.Identifier) positions.push(binExpr.left.pos); } ts.forEachChild(node, analyse); } function fixPos(pos)
} export = vue2jsx;
{ while(/\s/.test(jsCode.substr(pos, 1)) && pos < jsCode.length) pos++; return pos; }
identifier_body
cme_stats.py
""" Generate stats for correct (true pos), missed (false neg), extraneous (false pos) using the top-n datasets returned Creates a json/csv files. Look in stats_and_csv folder to see what the output look like """ import json import re from collections import defaultdict from enum import Enum import os # When I ran CMR queries, I used two methods. Method 1: Use the parameters the CMR api exposes. Method 2: Just enter # my terms as a free text search into CMR class CMRSearchType(Enum): SCIENCE_KEYWORD = 0, # used the CMR parameters KEYWORD = 1, # used a free text search inside CMR BOTH = 2 # Merge the results from science keyword and plain text search # format a comma separated list into a semi-colon separated list def format_lot(lot): lot_str = str(lot) lot_str = re.sub(r'[\[\]\(\)]', '', lot_str) lot_str = re.sub(r', (\d+)', '(\\1)', lot_str) lot_str = re.sub(r',', ';', lot_str) return lot_str # Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed), # and false positivies (extraneous) def correct_missed_extraneous(ground_truths, predictions): ground_truths = set(ground_truths) correct = predictions & ground_truths missed = ground_truths - predictions extraneous = predictions - ground_truths return correct, missed, extraneous # csv is a string which will be written to a csv file at the end # running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False): # extract the platform/ins couples and models from the features summary_stats = features['summary_stats'] couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True) models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True) title = re.sub(',', '', title) # write key, title, platform/ins couples, and models to csv string csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},' # add a column with the manually reviewed datasets if the paper was manually reviewed if manually_reviewed: manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed']) csv += f'{manual_ground_truths}' # get TOP-N CMR results from pairs cmr_results = set() for inner_key, inner_value in features['cmr_results']['pairs'].items(): # the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches. # get the predicted datasets from the appropriate search if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: datasets = inner_value['keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.BOTH: # merge the two lists together, alternating order l1 = inner_value['science_keyword_search']['dataset'] l2 = inner_value['keyword_search']['dataset'] i, j, datasets_temp = 0, 0, [] while i < len(l1) and j < len(l2): datasets_temp.append(l1[i]) datasets_temp.append(l2[j]) i += 1 j += 1 if i < len(l1): datasets_temp += l1[i:] elif j < len(l2): datasets_temp += l2[j:] # remove duplicates seen = set() datasets = [] for i in range(len(datasets_temp)): if datasets_temp[i] in seen: continue seen.add(datasets_temp[i]) datasets.append(datasets_temp[i]) if len(datasets) >= 1: for predic in datasets[:n]: cmr_results.add(predic) # cmr queries based on the single instruments and not just the couples if include_singles: for inner_key, inner_value in features['cmr_results']['singles'].items(): if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: single_datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: single_datasets = inner_value['keyword_search']['dataset'] else: single_datasets = None if single_datasets: for predic in single_datasets[:n]: if predic not in cmr_results: cmr_results.add(predic) # create semi-colon delineated string with the predicted datasets from CMR and add to csv string cmr_list = ';'.join(list(cmr_results)) csv += f',{cmr_list}' # If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were # correct, missed, and extraneous. if manually_reviewed: correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results) running_cme_stats['correct_count'] += len(correct) running_cme_stats['missed_count'] += len(missed) running_cme_stats['extraneous_count'] += len(extraneous) # keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted # ML2O3 correctly 54 times) for corr in correct: running_cme_stats['correct_dict'][corr] += 1 for miss in missed: running_cme_stats['missed_dict'][miss] += 1 for extra in extraneous: running_cme_stats['extraneous_dict'][extra] += 1 csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}' return csv + "\n" if __name__ == '__main__': # User Parameters features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9 max_n = 9 cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches # Declare the name of the output file output_title = 'giovanni_' # change this include_singles_string = 'with_singles_' if include_singles else '' sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/' base_location = 'stats_and_csv/giovanni/' + sub_folder # change this with open(features_location, encoding='utf-8') as f: features = json.load(f) with open(key_title_ground_truth_location, encoding='utf-8') as f: key_title_ground_truth = json.load(f) correct, missed, extraneous = [], [], [] # make a folder if one doesn't exist if not os.path.exists(base_location): os.makedirs(base_location) # run the top-n results for all values of n while n <= max_n: filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}' added_pdfs = set() running_cme_stats = { "correct_count": 0, "missed_count": 0, "extraneous_count": 0, "correct_dict": defaultdict(int), "missed_dict": defaultdict(int), "extraneous_dict": defaultdict(int) }
for parent_key, value in key_title_ground_truth.items(): pdf_key = value['pdf'] added_pdfs.add(pdf_key) if pdf_key in features: # update both csv file and json file csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats, n=n, dataset_search_type=cmr_search_type) # loop through the papers that were not manually reviewed for key, value in features.items(): if key not in added_pdfs: # update only csv file csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type) # sort the individual counts of number of times that a dataset was correct, missed, or extraneous running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True)) # DON'T overwrite an existing file. Exit out in this case if os.path.exists(filename + '.json'): print("\n\nFile with name already exists\n\n") exit() # save the json and csv files for the top-n with open(filename + '.json', 'w', encoding='utf-8') as f: json.dump(running_cme_stats, f, indent=4) with open(filename + '.csv', 'w', encoding='utf-8') as f: f.write(csv) # save the counts for correct, missed, extraneous into the local arrays correct.append(running_cme_stats['correct_count']) missed.append(running_cme_stats['missed_count']) extraneous.append(running_cme_stats['extraneous_count']) # run the loop again with a larger value of n n += 1 # save a file with the three lists for correct missed and extraneous and how the values change as a function of n summary_dict = { "cmr_mode": cmr_search_type.name.lower(), "correct_counts": correct, "missed_counts": missed, "extraneous_counts": extraneous, } # save the summary stats with open(base_location + f'{cmr_search_type.name.lower()}_summary_counts.json', 'w', encoding='utf-8') as f: json.dump(summary_dict, f)
csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n" # iterate through the manually reviewed papers. Add data into csv and json files via dump_data method
random_line_split
cme_stats.py
""" Generate stats for correct (true pos), missed (false neg), extraneous (false pos) using the top-n datasets returned Creates a json/csv files. Look in stats_and_csv folder to see what the output look like """ import json import re from collections import defaultdict from enum import Enum import os # When I ran CMR queries, I used two methods. Method 1: Use the parameters the CMR api exposes. Method 2: Just enter # my terms as a free text search into CMR class CMRSearchType(Enum): SCIENCE_KEYWORD = 0, # used the CMR parameters KEYWORD = 1, # used a free text search inside CMR BOTH = 2 # Merge the results from science keyword and plain text search # format a comma separated list into a semi-colon separated list def format_lot(lot): lot_str = str(lot) lot_str = re.sub(r'[\[\]\(\)]', '', lot_str) lot_str = re.sub(r', (\d+)', '(\\1)', lot_str) lot_str = re.sub(r',', ';', lot_str) return lot_str # Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed), # and false positivies (extraneous) def correct_missed_extraneous(ground_truths, predictions): ground_truths = set(ground_truths) correct = predictions & ground_truths missed = ground_truths - predictions extraneous = predictions - ground_truths return correct, missed, extraneous # csv is a string which will be written to a csv file at the end # running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end def
(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False): # extract the platform/ins couples and models from the features summary_stats = features['summary_stats'] couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True) models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True) title = re.sub(',', '', title) # write key, title, platform/ins couples, and models to csv string csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},' # add a column with the manually reviewed datasets if the paper was manually reviewed if manually_reviewed: manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed']) csv += f'{manual_ground_truths}' # get TOP-N CMR results from pairs cmr_results = set() for inner_key, inner_value in features['cmr_results']['pairs'].items(): # the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches. # get the predicted datasets from the appropriate search if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: datasets = inner_value['keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.BOTH: # merge the two lists together, alternating order l1 = inner_value['science_keyword_search']['dataset'] l2 = inner_value['keyword_search']['dataset'] i, j, datasets_temp = 0, 0, [] while i < len(l1) and j < len(l2): datasets_temp.append(l1[i]) datasets_temp.append(l2[j]) i += 1 j += 1 if i < len(l1): datasets_temp += l1[i:] elif j < len(l2): datasets_temp += l2[j:] # remove duplicates seen = set() datasets = [] for i in range(len(datasets_temp)): if datasets_temp[i] in seen: continue seen.add(datasets_temp[i]) datasets.append(datasets_temp[i]) if len(datasets) >= 1: for predic in datasets[:n]: cmr_results.add(predic) # cmr queries based on the single instruments and not just the couples if include_singles: for inner_key, inner_value in features['cmr_results']['singles'].items(): if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: single_datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: single_datasets = inner_value['keyword_search']['dataset'] else: single_datasets = None if single_datasets: for predic in single_datasets[:n]: if predic not in cmr_results: cmr_results.add(predic) # create semi-colon delineated string with the predicted datasets from CMR and add to csv string cmr_list = ';'.join(list(cmr_results)) csv += f',{cmr_list}' # If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were # correct, missed, and extraneous. if manually_reviewed: correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results) running_cme_stats['correct_count'] += len(correct) running_cme_stats['missed_count'] += len(missed) running_cme_stats['extraneous_count'] += len(extraneous) # keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted # ML2O3 correctly 54 times) for corr in correct: running_cme_stats['correct_dict'][corr] += 1 for miss in missed: running_cme_stats['missed_dict'][miss] += 1 for extra in extraneous: running_cme_stats['extraneous_dict'][extra] += 1 csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}' return csv + "\n" if __name__ == '__main__': # User Parameters features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9 max_n = 9 cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches # Declare the name of the output file output_title = 'giovanni_' # change this include_singles_string = 'with_singles_' if include_singles else '' sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/' base_location = 'stats_and_csv/giovanni/' + sub_folder # change this with open(features_location, encoding='utf-8') as f: features = json.load(f) with open(key_title_ground_truth_location, encoding='utf-8') as f: key_title_ground_truth = json.load(f) correct, missed, extraneous = [], [], [] # make a folder if one doesn't exist if not os.path.exists(base_location): os.makedirs(base_location) # run the top-n results for all values of n while n <= max_n: filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}' added_pdfs = set() running_cme_stats = { "correct_count": 0, "missed_count": 0, "extraneous_count": 0, "correct_dict": defaultdict(int), "missed_dict": defaultdict(int), "extraneous_dict": defaultdict(int) } csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n" # iterate through the manually reviewed papers. Add data into csv and json files via dump_data method for parent_key, value in key_title_ground_truth.items(): pdf_key = value['pdf'] added_pdfs.add(pdf_key) if pdf_key in features: # update both csv file and json file csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats, n=n, dataset_search_type=cmr_search_type) # loop through the papers that were not manually reviewed for key, value in features.items(): if key not in added_pdfs: # update only csv file csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type) # sort the individual counts of number of times that a dataset was correct, missed, or extraneous running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True)) # DON'T overwrite an existing file. Exit out in this case if os.path.exists(filename + '.json'): print("\n\nFile with name already exists\n\n") exit() # save the json and csv files for the top-n with open(filename + '.json', 'w', encoding='utf-8') as f: json.dump(running_cme_stats, f, indent=4) with open(filename + '.csv', 'w', encoding='utf-8') as f: f.write(csv) # save the counts for correct, missed, extraneous into the local arrays correct.append(running_cme_stats['correct_count']) missed.append(running_cme_stats['missed_count']) extraneous.append(running_cme_stats['extraneous_count']) # run the loop again with a larger value of n n += 1 # save a file with the three lists for correct missed and extraneous and how the values change as a function of n summary_dict = { "cmr_mode": cmr_search_type.name.lower(), "correct_counts": correct, "missed_counts": missed, "extraneous_counts": extraneous, } # save the summary stats with open(base_location + f'{cmr_search_type.name.lower()}_summary_counts.json', 'w', encoding='utf-8') as f: json.dump(summary_dict, f)
dump_data
identifier_name
cme_stats.py
""" Generate stats for correct (true pos), missed (false neg), extraneous (false pos) using the top-n datasets returned Creates a json/csv files. Look in stats_and_csv folder to see what the output look like """ import json import re from collections import defaultdict from enum import Enum import os # When I ran CMR queries, I used two methods. Method 1: Use the parameters the CMR api exposes. Method 2: Just enter # my terms as a free text search into CMR class CMRSearchType(Enum): SCIENCE_KEYWORD = 0, # used the CMR parameters KEYWORD = 1, # used a free text search inside CMR BOTH = 2 # Merge the results from science keyword and plain text search # format a comma separated list into a semi-colon separated list def format_lot(lot): lot_str = str(lot) lot_str = re.sub(r'[\[\]\(\)]', '', lot_str) lot_str = re.sub(r', (\d+)', '(\\1)', lot_str) lot_str = re.sub(r',', ';', lot_str) return lot_str # Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed), # and false positivies (extraneous) def correct_missed_extraneous(ground_truths, predictions): ground_truths = set(ground_truths) correct = predictions & ground_truths missed = ground_truths - predictions extraneous = predictions - ground_truths return correct, missed, extraneous # csv is a string which will be written to a csv file at the end # running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False): # extract the platform/ins couples and models from the features
if __name__ == '__main__': # User Parameters features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9 max_n = 9 cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches # Declare the name of the output file output_title = 'giovanni_' # change this include_singles_string = 'with_singles_' if include_singles else '' sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/' base_location = 'stats_and_csv/giovanni/' + sub_folder # change this with open(features_location, encoding='utf-8') as f: features = json.load(f) with open(key_title_ground_truth_location, encoding='utf-8') as f: key_title_ground_truth = json.load(f) correct, missed, extraneous = [], [], [] # make a folder if one doesn't exist if not os.path.exists(base_location): os.makedirs(base_location) # run the top-n results for all values of n while n <= max_n: filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}' added_pdfs = set() running_cme_stats = { "correct_count": 0, "missed_count": 0, "extraneous_count": 0, "correct_dict": defaultdict(int), "missed_dict": defaultdict(int), "extraneous_dict": defaultdict(int) } csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n" # iterate through the manually reviewed papers. Add data into csv and json files via dump_data method for parent_key, value in key_title_ground_truth.items(): pdf_key = value['pdf'] added_pdfs.add(pdf_key) if pdf_key in features: # update both csv file and json file csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats, n=n, dataset_search_type=cmr_search_type) # loop through the papers that were not manually reviewed for key, value in features.items(): if key not in added_pdfs: # update only csv file csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type) # sort the individual counts of number of times that a dataset was correct, missed, or extraneous running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True)) # DON'T overwrite an existing file. Exit out in this case if os.path.exists(filename + '.json'): print("\n\nFile with name already exists\n\n") exit() # save the json and csv files for the top-n with open(filename + '.json', 'w', encoding='utf-8') as f: json.dump(running_cme_stats, f, indent=4) with open(filename + '.csv', 'w', encoding='utf-8') as f: f.write(csv) # save the counts for correct, missed, extraneous into the local arrays correct.append(running_cme_stats['correct_count']) missed.append(running_cme_stats['missed_count']) extraneous.append(running_cme_stats['extraneous_count']) # run the loop again with a larger value of n n += 1 # save a file with the three lists for correct missed and extraneous and how the values change as a function of n summary_dict = { "cmr_mode": cmr_search_type.name.lower(), "correct_counts": correct, "missed_counts": missed, "extraneous_counts": extraneous, } # save the summary stats with open(base_location + f'{cmr_search_type.name.lower()}_summary_counts.json', 'w', encoding='utf-8') as f: json.dump(summary_dict, f)
summary_stats = features['summary_stats'] couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True) models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True) title = re.sub(',', '', title) # write key, title, platform/ins couples, and models to csv string csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},' # add a column with the manually reviewed datasets if the paper was manually reviewed if manually_reviewed: manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed']) csv += f'{manual_ground_truths}' # get TOP-N CMR results from pairs cmr_results = set() for inner_key, inner_value in features['cmr_results']['pairs'].items(): # the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches. # get the predicted datasets from the appropriate search if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: datasets = inner_value['keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.BOTH: # merge the two lists together, alternating order l1 = inner_value['science_keyword_search']['dataset'] l2 = inner_value['keyword_search']['dataset'] i, j, datasets_temp = 0, 0, [] while i < len(l1) and j < len(l2): datasets_temp.append(l1[i]) datasets_temp.append(l2[j]) i += 1 j += 1 if i < len(l1): datasets_temp += l1[i:] elif j < len(l2): datasets_temp += l2[j:] # remove duplicates seen = set() datasets = [] for i in range(len(datasets_temp)): if datasets_temp[i] in seen: continue seen.add(datasets_temp[i]) datasets.append(datasets_temp[i]) if len(datasets) >= 1: for predic in datasets[:n]: cmr_results.add(predic) # cmr queries based on the single instruments and not just the couples if include_singles: for inner_key, inner_value in features['cmr_results']['singles'].items(): if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: single_datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: single_datasets = inner_value['keyword_search']['dataset'] else: single_datasets = None if single_datasets: for predic in single_datasets[:n]: if predic not in cmr_results: cmr_results.add(predic) # create semi-colon delineated string with the predicted datasets from CMR and add to csv string cmr_list = ';'.join(list(cmr_results)) csv += f',{cmr_list}' # If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were # correct, missed, and extraneous. if manually_reviewed: correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results) running_cme_stats['correct_count'] += len(correct) running_cme_stats['missed_count'] += len(missed) running_cme_stats['extraneous_count'] += len(extraneous) # keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted # ML2O3 correctly 54 times) for corr in correct: running_cme_stats['correct_dict'][corr] += 1 for miss in missed: running_cme_stats['missed_dict'][miss] += 1 for extra in extraneous: running_cme_stats['extraneous_dict'][extra] += 1 csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}' return csv + "\n"
identifier_body
cme_stats.py
""" Generate stats for correct (true pos), missed (false neg), extraneous (false pos) using the top-n datasets returned Creates a json/csv files. Look in stats_and_csv folder to see what the output look like """ import json import re from collections import defaultdict from enum import Enum import os # When I ran CMR queries, I used two methods. Method 1: Use the parameters the CMR api exposes. Method 2: Just enter # my terms as a free text search into CMR class CMRSearchType(Enum): SCIENCE_KEYWORD = 0, # used the CMR parameters KEYWORD = 1, # used a free text search inside CMR BOTH = 2 # Merge the results from science keyword and plain text search # format a comma separated list into a semi-colon separated list def format_lot(lot): lot_str = str(lot) lot_str = re.sub(r'[\[\]\(\)]', '', lot_str) lot_str = re.sub(r', (\d+)', '(\\1)', lot_str) lot_str = re.sub(r',', ';', lot_str) return lot_str # Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed), # and false positivies (extraneous) def correct_missed_extraneous(ground_truths, predictions): ground_truths = set(ground_truths) correct = predictions & ground_truths missed = ground_truths - predictions extraneous = predictions - ground_truths return correct, missed, extraneous # csv is a string which will be written to a csv file at the end # running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False): # extract the platform/ins couples and models from the features summary_stats = features['summary_stats'] couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True) models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True) title = re.sub(',', '', title) # write key, title, platform/ins couples, and models to csv string csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},' # add a column with the manually reviewed datasets if the paper was manually reviewed if manually_reviewed: manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed']) csv += f'{manual_ground_truths}' # get TOP-N CMR results from pairs cmr_results = set() for inner_key, inner_value in features['cmr_results']['pairs'].items(): # the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches. # get the predicted datasets from the appropriate search if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: datasets = inner_value['keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.BOTH: # merge the two lists together, alternating order l1 = inner_value['science_keyword_search']['dataset'] l2 = inner_value['keyword_search']['dataset'] i, j, datasets_temp = 0, 0, [] while i < len(l1) and j < len(l2): datasets_temp.append(l1[i]) datasets_temp.append(l2[j]) i += 1 j += 1 if i < len(l1): datasets_temp += l1[i:] elif j < len(l2): datasets_temp += l2[j:] # remove duplicates seen = set() datasets = [] for i in range(len(datasets_temp)): if datasets_temp[i] in seen: continue seen.add(datasets_temp[i]) datasets.append(datasets_temp[i]) if len(datasets) >= 1: for predic in datasets[:n]: cmr_results.add(predic) # cmr queries based on the single instruments and not just the couples if include_singles: for inner_key, inner_value in features['cmr_results']['singles'].items(): if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD: single_datasets = inner_value['science_keyword_search']['dataset'] elif dataset_search_type == CMRSearchType.KEYWORD: single_datasets = inner_value['keyword_search']['dataset'] else: single_datasets = None if single_datasets:
# create semi-colon delineated string with the predicted datasets from CMR and add to csv string cmr_list = ';'.join(list(cmr_results)) csv += f',{cmr_list}' # If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were # correct, missed, and extraneous. if manually_reviewed: correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results) running_cme_stats['correct_count'] += len(correct) running_cme_stats['missed_count'] += len(missed) running_cme_stats['extraneous_count'] += len(extraneous) # keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted # ML2O3 correctly 54 times) for corr in correct: running_cme_stats['correct_dict'][corr] += 1 for miss in missed: running_cme_stats['missed_dict'][miss] += 1 for extra in extraneous: running_cme_stats['extraneous_dict'][extra] += 1 csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}' return csv + "\n" if __name__ == '__main__': # User Parameters features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9 max_n = 9 cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches # Declare the name of the output file output_title = 'giovanni_' # change this include_singles_string = 'with_singles_' if include_singles else '' sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/' base_location = 'stats_and_csv/giovanni/' + sub_folder # change this with open(features_location, encoding='utf-8') as f: features = json.load(f) with open(key_title_ground_truth_location, encoding='utf-8') as f: key_title_ground_truth = json.load(f) correct, missed, extraneous = [], [], [] # make a folder if one doesn't exist if not os.path.exists(base_location): os.makedirs(base_location) # run the top-n results for all values of n while n <= max_n: filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}' added_pdfs = set() running_cme_stats = { "correct_count": 0, "missed_count": 0, "extraneous_count": 0, "correct_dict": defaultdict(int), "missed_dict": defaultdict(int), "extraneous_dict": defaultdict(int) } csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n" # iterate through the manually reviewed papers. Add data into csv and json files via dump_data method for parent_key, value in key_title_ground_truth.items(): pdf_key = value['pdf'] added_pdfs.add(pdf_key) if pdf_key in features: # update both csv file and json file csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats, n=n, dataset_search_type=cmr_search_type) # loop through the papers that were not manually reviewed for key, value in features.items(): if key not in added_pdfs: # update only csv file csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type) # sort the individual counts of number of times that a dataset was correct, missed, or extraneous running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True)) running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True)) # DON'T overwrite an existing file. Exit out in this case if os.path.exists(filename + '.json'): print("\n\nFile with name already exists\n\n") exit() # save the json and csv files for the top-n with open(filename + '.json', 'w', encoding='utf-8') as f: json.dump(running_cme_stats, f, indent=4) with open(filename + '.csv', 'w', encoding='utf-8') as f: f.write(csv) # save the counts for correct, missed, extraneous into the local arrays correct.append(running_cme_stats['correct_count']) missed.append(running_cme_stats['missed_count']) extraneous.append(running_cme_stats['extraneous_count']) # run the loop again with a larger value of n n += 1 # save a file with the three lists for correct missed and extraneous and how the values change as a function of n summary_dict = { "cmr_mode": cmr_search_type.name.lower(), "correct_counts": correct, "missed_counts": missed, "extraneous_counts": extraneous, } # save the summary stats with open(base_location + f'{cmr_search_type.name.lower()}_summary_counts.json', 'w', encoding='utf-8') as f: json.dump(summary_dict, f)
for predic in single_datasets[:n]: if predic not in cmr_results: cmr_results.add(predic)
conditional_block
mod.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use aes::cipher::generic_array::GenericArray; use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher}; use fuchsia_inspect::{self as inspect, Property}; use fuchsia_inspect_derive::{AttachError, Inspect}; use lru_cache::LruCache; use rand::Rng; use serde::{Deserialize, Serialize}; use std::convert::{TryFrom, TryInto}; use std::{fs, io, path}; use tracing::{debug, warn}; use crate::advertisement::bloom_filter; mod error; pub mod keys; pub mod packets; pub use error::Error; /// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration. #[derive(Debug, Copy, Clone, PartialEq)] pub struct ModelId(u32); impl TryFrom<u32> for ModelId { type Error = Error; fn try_from(src: u32) -> Result<Self, Self::Error> { // u24::MAX if src > 0xffffff { return Err(Error::InvalidModelId(src)); } Ok(Self(src)) } } impl From<ModelId> for [u8; 3] { fn from(src: ModelId) -> [u8; 3] { let mut bytes = [0; 3]; bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]); bytes } } /// A key used during the Fast Pair Pairing Procedure. /// This key is a temporary value that lives for the lifetime of a procedure. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct SharedSecret([u8; 16]); impl SharedSecret { pub fn new(bytes: [u8; 16]) -> Self { Self(bytes) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0 } /// Decrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the decrypted payload. pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.decrypt_block(&mut block); block.into() } /// Encrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the encrypted payload. pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.encrypt_block(&mut block); block.into() } } /// A long-lived key that allows the Provider to be recognized as belonging to a certain user /// account. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct AccountKey(SharedSecret); impl AccountKey { pub fn new(bytes: [u8; 16]) -> Self { Self(SharedSecret::new(bytes)) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0.as_bytes() } pub fn shared_secret(&self) -> &SharedSecret { &self.0 } } impl From<&SharedSecret> for AccountKey { fn from(src: &SharedSecret) -> AccountKey { AccountKey(src.clone()) } } /// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys /// will be evicted in an LRU manner as described in the GFPS specification. /// This limit is chosen as the minimum required by any implementation and provides ample space /// in the LE advertisement packet. /// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList /// for more details. const MAX_ACCOUNT_KEYS: usize = 5; /// Manages the set of saved Account Keys. /// /// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the /// `AccountKeyList` will evict the least recently used Account Key. /// /// Account Keys are written to isolated persistent storage and are maintained across reboots. The /// set of saved keys will only be erased on device factory resets. /// To avoid writing to persistent storage too often, only new Account Keys are written to storage. /// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be /// updated in the backing storage file. pub struct AccountKeyList { /// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value /// as we only care about maintaining the keys. keys: LruCache<AccountKey, ()>, /// The file path pointing to the isolated persistent storage which saves the Account Keys. path: path::PathBuf, /// The number of keys currently saved in the AccountKeyList. account_key_count: inspect::UintProperty, } impl Inspect for &mut AccountKeyList { fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> { self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64); Ok(()) } } impl AccountKeyList { /// Attempts to load the current set of saved Account Keys from isolated persistent storage. /// Returns the updated AccountKeyList of keys on success, Error otherwise. pub fn load() -> Result<Self, Error> { Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH) } /// Builds an AccountKey list with the provided `keys`. /// A random test file path is used to avoid concurrently running tests from reading/writing /// from/to the same file. #[cfg(test)] pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self { let mut cache = LruCache::new(capacity); keys.into_iter().for_each(|k| { let _ = cache.insert(k, ()); }); let val = rand::thread_rng().gen::<u64>(); let path = format!("data/test_account_keys{}.json", val); Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() } } #[cfg(test)] pub fn path(&self) -> String { self.path.clone().into_os_string().into_string().expect("valid path string") } fn update_inspect(&self) { self.account_key_count.set(self.keys.len() as u64); } /// Returns an Iterator over the saved Account Keys. /// Note: Access via Iterator does not modify LRU state. pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator { self.keys.iter().map(|(k, _)| k) } /// Marks the provided `key` as used in the LRU cache. /// Returns Error if the key does not exist in the cache. pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> { self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used")) } /// Save an Account Key to the persisted set of keys. pub fn save(&mut self, key: AccountKey) { // If the `key` already exists, it will be updated in the LRU cache. If the cache is // full, the least-recently used (LRU) key will be evicted. if self.keys.insert(key, ()).is_some() { debug!("Account Key already saved"); } // Store the updated set of keys in persistent storage. if let Err(e) = self.store() { warn!("Couldn't update key list in isolated persistent storage: {:?}", e); } self.update_inspect(); } /// Returns the service data payload associated with the current set of Account Keys. pub fn service_data(&self) -> Result<Vec<u8>, Error> { if self.keys.is_empty() { return Ok(vec![0x0]); } let salt = rand::thread_rng().gen::<u8>(); self.service_data_internal(salt) } fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> { let account_keys_bytes = bloom_filter(self.keys(), salt)?; let mut result = Vec::new(); // First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to // show UI notification, 0b0010 to hide it). The maximum amount of account key data that can // be represented is 15 bytes (u4::MAX). let length: u8 = match account_keys_bytes.len().try_into() { Ok(len) if len <= 15 => len, _ => return Err(Error::internal("Account key data too large")), }; // For now, we will always request to show the UI notification (TTTT = 0b0000). result.push(length << 4); // Next n bytes are the Bloom-filtered Account Key list. result.extend(account_keys_bytes); // The descriptor value associated with the Salt section of the LE advertisement payload. // Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed. const SALT_DESCRIPTOR: u8 = 0x11; result.push(SALT_DESCRIPTOR); // Final byte is the Salt value. result.push(salt); Ok(result) } // Default file path for Account Keys written to isolated persistent storage. const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json"; /// Attempts to read and parse the contents of the persistent storage at the provided `path`. /// Returns an `AccountKeyList` on success, Error otherwise. fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> { let mut this = Self { keys: LruCache::new(MAX_ACCOUNT_KEYS), path: path::PathBuf::from(path.as_ref()), account_key_count: Default::default(), }; this.load_internal()?; Ok(this) } /// Attempts to update the locally-saved set of keys from persistent storage. /// Returns Error if the storage file is unable to be opened. fn load_internal(&mut self) -> Result<(), Error> { match fs::File::open(&self.path) { Ok(file) => { // Build the LRU cache from the contents of the file. Because keys are stored in // LRU order, we build the cache in the same order to preserve LRU status. debug!("Reading Account Keys from existing file"); let key_list = KeyList::load(file)?; key_list.0.into_iter().for_each(|k| { let _ = self.keys.insert(k, ()); }); Ok(()) } Err(error) if error.kind() == io::ErrorKind::NotFound => { debug!("Persistent storage file not found"); Ok(()) } Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")), } } /// Commits the current set of Account Keys to isolated persistent storage. /// Keys are stored in LRU order. fn store(&self) -> Result<(), Error> { let path = path::Path::new(&self.path); let file_name = path.file_name().ok_or(Error::key_storage( io::ErrorKind::InvalidInput.into(), "couldn't build file name from path", ))?; let file_path = path.with_file_name(file_name.to_os_string()); let file = fs::File::create(&file_path) .map_err(|e| Error::key_storage(e, "couldn't create file"))?; let values = KeyList(self.keys().cloned().collect()); serde_json::to_writer(file, &values)?; Ok(()) } } /// Convenience type for the serialization and deserialization of Account Keys. #[derive(Serialize, Deserialize)] struct KeyList(Vec<AccountKey>); impl KeyList { fn load<R: io::Read>(reader: R) -> Result<Self, Error> { serde_json::from_reader(reader).map_err(Into::into) } } #[cfg(test)] pub(crate) mod tests { use super::*; use assert_matches::assert_matches; /// Loads the set of saved Account Keys from storage and verifies that it's equal to the /// provided `expected_keys`. #[track_caller] pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>( path: P, expected_keys: Vec<AccountKey>, ) { let read_keys = AccountKeyList::load_from_path(path).expect("can read from file"); assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys); } #[test] fn model_id_from_u32() { let normal_id = 0x1234; let id = ModelId::try_from(normal_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x12, 0x34]); let zero_id = 0; let id = ModelId::try_from(zero_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x00, 0x00]); let max_id = 0xffffff; let id = ModelId::try_from(max_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0xff, 0xff, 0xff]); } #[test] fn invalid_model_id_conversion_is_error() { let invalid_id = 0x1ffabcd; assert_matches!(ModelId::try_from(invalid_id), Err(_)); } #[test] fn empty_account_key_list_service_data() { let empty = AccountKeyList::with_capacity_and_keys(1, vec![]); let service_data = empty.service_data().expect("can build service data"); let expected = [0x00]; assert_eq!(service_data, expected); } #[test] fn oversized_service_data_is_error() { // Building an AccountKeyList of 11 elements will result in an oversized service data. // In the future, this test will be obsolete as the AccountKeyList will be bounded in its // construction. let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect(); let oversized = AccountKeyList::with_capacity_and_keys(15, keys); let result = oversized.service_data(); assert_matches!(result, Err(Error::InternalError(_))); } #[test] fn account_key_list_service_data() { let example_key = AccountKey::new([1; 16]); let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]); let salt = 0x14; // Because the service data is generated with a random salt value, we test the internal // method with a controlled salt value so that the test is deterministic. let service_data = keys.service_data_internal(salt).expect("can build service_data"); let expected = [ 0x40, // Length = 4, Show UI indication 0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list 0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14) ]; assert_eq!(service_data, expected); } /// Tests AES-128 encryption & decryption using an Account Key as the Secret Key. /// The contents of this test case are pulled from the GFPS specification. /// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption #[test] fn aes_128_encryption_roundtrip() { let message = [ 0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA, 0x97, 0xEA, ]; let account_key = AccountKey::new([ 0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3, 0x32, 0x1D, ]); let encrypted = account_key.shared_secret().encrypt(&message); let expected = [ 0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0, 0x9E, 0x9C, ]; assert_eq!(encrypted, expected); let decrypted = account_key.shared_secret().decrypt(&encrypted); assert_eq!(decrypted, message); } #[test] fn account_key_lru_eviction()
#[test] fn mark_used_nonexistent_key_is_error() { let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]); let key = AccountKey::new([1; 16]); assert_matches!(list.mark_used(&key), Err(_)); } #[fuchsia::test] fn load_keys_from_nonexistent_file() { const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json"; expect_keys_at_path(EXAMPLE_FILEPATH, vec![]); } #[fuchsia::test] fn commit_and_load_keys_to_and_from_a_file() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); let example_keys = vec![key1, key2, key3]; let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone()); keys.store().expect("can store Account Keys"); expect_keys_at_path(keys.path(), example_keys); } #[fuchsia::test] fn lru_eviction_from_storage() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); // New collection with maximum capacity of 2 keys. let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key1.clone()); expect_keys_at_path(keys.path(), vec![key1.clone()]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key2.clone()); expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]); // Because `key1` already exists in the collection, we expect a cache "refresh" so the key // ordering in storage should change. keys.save(key1.clone()); // e.g The LRU order should change whereby `key2` is now the LRU. expect_keys_at_path(keys.path(), vec![key2, key1.clone()]); // The collection is at max capacity so `key2` (LRU) should be evicted. Local storage // should be updated. keys.save(key3.clone()); expect_keys_at_path(keys.path(), vec![key1, key3]); } }
{ let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]); let max: u8 = MAX_ACCOUNT_KEYS as u8; for i in 1..max + 1 { let key = AccountKey::new([i; 16]); list.save(key.clone()); assert_eq!(list.keys().len(), i as usize); assert!(list.keys.contains_key(&key)); } // Adding a new key results in the eviction of the LRU key. assert_eq!(list.keys().len(), max as usize); let new_key = AccountKey::new([max + 1; 16]); list.save(new_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&new_key)); // LRU Key is no longer stored. let first_key = AccountKey::new([1; 16]); assert!(!list.keys.contains_key(&first_key)); // Marking a key as used should "refresh" the key's position. It is no longer the LRU key // that will be evicted. let account_key2 = AccountKey::new([2; 16]); assert_matches!(list.mark_used(&account_key2), Ok(_)); // Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore). let next_key = AccountKey::new([max + 2; 16]); list.save(next_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&next_key)); assert!(list.keys.contains_key(&account_key2)); }
identifier_body
mod.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use aes::cipher::generic_array::GenericArray; use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher}; use fuchsia_inspect::{self as inspect, Property}; use fuchsia_inspect_derive::{AttachError, Inspect}; use lru_cache::LruCache; use rand::Rng; use serde::{Deserialize, Serialize}; use std::convert::{TryFrom, TryInto}; use std::{fs, io, path}; use tracing::{debug, warn}; use crate::advertisement::bloom_filter; mod error; pub mod keys; pub mod packets; pub use error::Error; /// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration. #[derive(Debug, Copy, Clone, PartialEq)] pub struct ModelId(u32); impl TryFrom<u32> for ModelId { type Error = Error; fn try_from(src: u32) -> Result<Self, Self::Error> { // u24::MAX if src > 0xffffff { return Err(Error::InvalidModelId(src)); } Ok(Self(src)) } } impl From<ModelId> for [u8; 3] { fn from(src: ModelId) -> [u8; 3] { let mut bytes = [0; 3]; bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]); bytes } } /// A key used during the Fast Pair Pairing Procedure. /// This key is a temporary value that lives for the lifetime of a procedure. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct SharedSecret([u8; 16]); impl SharedSecret { pub fn new(bytes: [u8; 16]) -> Self { Self(bytes) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0 } /// Decrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the decrypted payload. pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.decrypt_block(&mut block); block.into() } /// Encrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the encrypted payload. pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.encrypt_block(&mut block); block.into() }
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user /// account. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct AccountKey(SharedSecret); impl AccountKey { pub fn new(bytes: [u8; 16]) -> Self { Self(SharedSecret::new(bytes)) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0.as_bytes() } pub fn shared_secret(&self) -> &SharedSecret { &self.0 } } impl From<&SharedSecret> for AccountKey { fn from(src: &SharedSecret) -> AccountKey { AccountKey(src.clone()) } } /// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys /// will be evicted in an LRU manner as described in the GFPS specification. /// This limit is chosen as the minimum required by any implementation and provides ample space /// in the LE advertisement packet. /// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList /// for more details. const MAX_ACCOUNT_KEYS: usize = 5; /// Manages the set of saved Account Keys. /// /// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the /// `AccountKeyList` will evict the least recently used Account Key. /// /// Account Keys are written to isolated persistent storage and are maintained across reboots. The /// set of saved keys will only be erased on device factory resets. /// To avoid writing to persistent storage too often, only new Account Keys are written to storage. /// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be /// updated in the backing storage file. pub struct AccountKeyList { /// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value /// as we only care about maintaining the keys. keys: LruCache<AccountKey, ()>, /// The file path pointing to the isolated persistent storage which saves the Account Keys. path: path::PathBuf, /// The number of keys currently saved in the AccountKeyList. account_key_count: inspect::UintProperty, } impl Inspect for &mut AccountKeyList { fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> { self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64); Ok(()) } } impl AccountKeyList { /// Attempts to load the current set of saved Account Keys from isolated persistent storage. /// Returns the updated AccountKeyList of keys on success, Error otherwise. pub fn load() -> Result<Self, Error> { Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH) } /// Builds an AccountKey list with the provided `keys`. /// A random test file path is used to avoid concurrently running tests from reading/writing /// from/to the same file. #[cfg(test)] pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self { let mut cache = LruCache::new(capacity); keys.into_iter().for_each(|k| { let _ = cache.insert(k, ()); }); let val = rand::thread_rng().gen::<u64>(); let path = format!("data/test_account_keys{}.json", val); Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() } } #[cfg(test)] pub fn path(&self) -> String { self.path.clone().into_os_string().into_string().expect("valid path string") } fn update_inspect(&self) { self.account_key_count.set(self.keys.len() as u64); } /// Returns an Iterator over the saved Account Keys. /// Note: Access via Iterator does not modify LRU state. pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator { self.keys.iter().map(|(k, _)| k) } /// Marks the provided `key` as used in the LRU cache. /// Returns Error if the key does not exist in the cache. pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> { self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used")) } /// Save an Account Key to the persisted set of keys. pub fn save(&mut self, key: AccountKey) { // If the `key` already exists, it will be updated in the LRU cache. If the cache is // full, the least-recently used (LRU) key will be evicted. if self.keys.insert(key, ()).is_some() { debug!("Account Key already saved"); } // Store the updated set of keys in persistent storage. if let Err(e) = self.store() { warn!("Couldn't update key list in isolated persistent storage: {:?}", e); } self.update_inspect(); } /// Returns the service data payload associated with the current set of Account Keys. pub fn service_data(&self) -> Result<Vec<u8>, Error> { if self.keys.is_empty() { return Ok(vec![0x0]); } let salt = rand::thread_rng().gen::<u8>(); self.service_data_internal(salt) } fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> { let account_keys_bytes = bloom_filter(self.keys(), salt)?; let mut result = Vec::new(); // First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to // show UI notification, 0b0010 to hide it). The maximum amount of account key data that can // be represented is 15 bytes (u4::MAX). let length: u8 = match account_keys_bytes.len().try_into() { Ok(len) if len <= 15 => len, _ => return Err(Error::internal("Account key data too large")), }; // For now, we will always request to show the UI notification (TTTT = 0b0000). result.push(length << 4); // Next n bytes are the Bloom-filtered Account Key list. result.extend(account_keys_bytes); // The descriptor value associated with the Salt section of the LE advertisement payload. // Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed. const SALT_DESCRIPTOR: u8 = 0x11; result.push(SALT_DESCRIPTOR); // Final byte is the Salt value. result.push(salt); Ok(result) } // Default file path for Account Keys written to isolated persistent storage. const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json"; /// Attempts to read and parse the contents of the persistent storage at the provided `path`. /// Returns an `AccountKeyList` on success, Error otherwise. fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> { let mut this = Self { keys: LruCache::new(MAX_ACCOUNT_KEYS), path: path::PathBuf::from(path.as_ref()), account_key_count: Default::default(), }; this.load_internal()?; Ok(this) } /// Attempts to update the locally-saved set of keys from persistent storage. /// Returns Error if the storage file is unable to be opened. fn load_internal(&mut self) -> Result<(), Error> { match fs::File::open(&self.path) { Ok(file) => { // Build the LRU cache from the contents of the file. Because keys are stored in // LRU order, we build the cache in the same order to preserve LRU status. debug!("Reading Account Keys from existing file"); let key_list = KeyList::load(file)?; key_list.0.into_iter().for_each(|k| { let _ = self.keys.insert(k, ()); }); Ok(()) } Err(error) if error.kind() == io::ErrorKind::NotFound => { debug!("Persistent storage file not found"); Ok(()) } Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")), } } /// Commits the current set of Account Keys to isolated persistent storage. /// Keys are stored in LRU order. fn store(&self) -> Result<(), Error> { let path = path::Path::new(&self.path); let file_name = path.file_name().ok_or(Error::key_storage( io::ErrorKind::InvalidInput.into(), "couldn't build file name from path", ))?; let file_path = path.with_file_name(file_name.to_os_string()); let file = fs::File::create(&file_path) .map_err(|e| Error::key_storage(e, "couldn't create file"))?; let values = KeyList(self.keys().cloned().collect()); serde_json::to_writer(file, &values)?; Ok(()) } } /// Convenience type for the serialization and deserialization of Account Keys. #[derive(Serialize, Deserialize)] struct KeyList(Vec<AccountKey>); impl KeyList { fn load<R: io::Read>(reader: R) -> Result<Self, Error> { serde_json::from_reader(reader).map_err(Into::into) } } #[cfg(test)] pub(crate) mod tests { use super::*; use assert_matches::assert_matches; /// Loads the set of saved Account Keys from storage and verifies that it's equal to the /// provided `expected_keys`. #[track_caller] pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>( path: P, expected_keys: Vec<AccountKey>, ) { let read_keys = AccountKeyList::load_from_path(path).expect("can read from file"); assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys); } #[test] fn model_id_from_u32() { let normal_id = 0x1234; let id = ModelId::try_from(normal_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x12, 0x34]); let zero_id = 0; let id = ModelId::try_from(zero_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x00, 0x00]); let max_id = 0xffffff; let id = ModelId::try_from(max_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0xff, 0xff, 0xff]); } #[test] fn invalid_model_id_conversion_is_error() { let invalid_id = 0x1ffabcd; assert_matches!(ModelId::try_from(invalid_id), Err(_)); } #[test] fn empty_account_key_list_service_data() { let empty = AccountKeyList::with_capacity_and_keys(1, vec![]); let service_data = empty.service_data().expect("can build service data"); let expected = [0x00]; assert_eq!(service_data, expected); } #[test] fn oversized_service_data_is_error() { // Building an AccountKeyList of 11 elements will result in an oversized service data. // In the future, this test will be obsolete as the AccountKeyList will be bounded in its // construction. let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect(); let oversized = AccountKeyList::with_capacity_and_keys(15, keys); let result = oversized.service_data(); assert_matches!(result, Err(Error::InternalError(_))); } #[test] fn account_key_list_service_data() { let example_key = AccountKey::new([1; 16]); let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]); let salt = 0x14; // Because the service data is generated with a random salt value, we test the internal // method with a controlled salt value so that the test is deterministic. let service_data = keys.service_data_internal(salt).expect("can build service_data"); let expected = [ 0x40, // Length = 4, Show UI indication 0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list 0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14) ]; assert_eq!(service_data, expected); } /// Tests AES-128 encryption & decryption using an Account Key as the Secret Key. /// The contents of this test case are pulled from the GFPS specification. /// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption #[test] fn aes_128_encryption_roundtrip() { let message = [ 0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA, 0x97, 0xEA, ]; let account_key = AccountKey::new([ 0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3, 0x32, 0x1D, ]); let encrypted = account_key.shared_secret().encrypt(&message); let expected = [ 0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0, 0x9E, 0x9C, ]; assert_eq!(encrypted, expected); let decrypted = account_key.shared_secret().decrypt(&encrypted); assert_eq!(decrypted, message); } #[test] fn account_key_lru_eviction() { let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]); let max: u8 = MAX_ACCOUNT_KEYS as u8; for i in 1..max + 1 { let key = AccountKey::new([i; 16]); list.save(key.clone()); assert_eq!(list.keys().len(), i as usize); assert!(list.keys.contains_key(&key)); } // Adding a new key results in the eviction of the LRU key. assert_eq!(list.keys().len(), max as usize); let new_key = AccountKey::new([max + 1; 16]); list.save(new_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&new_key)); // LRU Key is no longer stored. let first_key = AccountKey::new([1; 16]); assert!(!list.keys.contains_key(&first_key)); // Marking a key as used should "refresh" the key's position. It is no longer the LRU key // that will be evicted. let account_key2 = AccountKey::new([2; 16]); assert_matches!(list.mark_used(&account_key2), Ok(_)); // Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore). let next_key = AccountKey::new([max + 2; 16]); list.save(next_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&next_key)); assert!(list.keys.contains_key(&account_key2)); } #[test] fn mark_used_nonexistent_key_is_error() { let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]); let key = AccountKey::new([1; 16]); assert_matches!(list.mark_used(&key), Err(_)); } #[fuchsia::test] fn load_keys_from_nonexistent_file() { const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json"; expect_keys_at_path(EXAMPLE_FILEPATH, vec![]); } #[fuchsia::test] fn commit_and_load_keys_to_and_from_a_file() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); let example_keys = vec![key1, key2, key3]; let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone()); keys.store().expect("can store Account Keys"); expect_keys_at_path(keys.path(), example_keys); } #[fuchsia::test] fn lru_eviction_from_storage() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); // New collection with maximum capacity of 2 keys. let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key1.clone()); expect_keys_at_path(keys.path(), vec![key1.clone()]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key2.clone()); expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]); // Because `key1` already exists in the collection, we expect a cache "refresh" so the key // ordering in storage should change. keys.save(key1.clone()); // e.g The LRU order should change whereby `key2` is now the LRU. expect_keys_at_path(keys.path(), vec![key2, key1.clone()]); // The collection is at max capacity so `key2` (LRU) should be evicted. Local storage // should be updated. keys.save(key3.clone()); expect_keys_at_path(keys.path(), vec![key1, key3]); } }
}
random_line_split
mod.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use aes::cipher::generic_array::GenericArray; use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher}; use fuchsia_inspect::{self as inspect, Property}; use fuchsia_inspect_derive::{AttachError, Inspect}; use lru_cache::LruCache; use rand::Rng; use serde::{Deserialize, Serialize}; use std::convert::{TryFrom, TryInto}; use std::{fs, io, path}; use tracing::{debug, warn}; use crate::advertisement::bloom_filter; mod error; pub mod keys; pub mod packets; pub use error::Error; /// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration. #[derive(Debug, Copy, Clone, PartialEq)] pub struct ModelId(u32); impl TryFrom<u32> for ModelId { type Error = Error; fn try_from(src: u32) -> Result<Self, Self::Error> { // u24::MAX if src > 0xffffff
Ok(Self(src)) } } impl From<ModelId> for [u8; 3] { fn from(src: ModelId) -> [u8; 3] { let mut bytes = [0; 3]; bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]); bytes } } /// A key used during the Fast Pair Pairing Procedure. /// This key is a temporary value that lives for the lifetime of a procedure. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct SharedSecret([u8; 16]); impl SharedSecret { pub fn new(bytes: [u8; 16]) -> Self { Self(bytes) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0 } /// Decrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the decrypted payload. pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.decrypt_block(&mut block); block.into() } /// Encrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the encrypted payload. pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.encrypt_block(&mut block); block.into() } } /// A long-lived key that allows the Provider to be recognized as belonging to a certain user /// account. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct AccountKey(SharedSecret); impl AccountKey { pub fn new(bytes: [u8; 16]) -> Self { Self(SharedSecret::new(bytes)) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0.as_bytes() } pub fn shared_secret(&self) -> &SharedSecret { &self.0 } } impl From<&SharedSecret> for AccountKey { fn from(src: &SharedSecret) -> AccountKey { AccountKey(src.clone()) } } /// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys /// will be evicted in an LRU manner as described in the GFPS specification. /// This limit is chosen as the minimum required by any implementation and provides ample space /// in the LE advertisement packet. /// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList /// for more details. const MAX_ACCOUNT_KEYS: usize = 5; /// Manages the set of saved Account Keys. /// /// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the /// `AccountKeyList` will evict the least recently used Account Key. /// /// Account Keys are written to isolated persistent storage and are maintained across reboots. The /// set of saved keys will only be erased on device factory resets. /// To avoid writing to persistent storage too often, only new Account Keys are written to storage. /// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be /// updated in the backing storage file. pub struct AccountKeyList { /// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value /// as we only care about maintaining the keys. keys: LruCache<AccountKey, ()>, /// The file path pointing to the isolated persistent storage which saves the Account Keys. path: path::PathBuf, /// The number of keys currently saved in the AccountKeyList. account_key_count: inspect::UintProperty, } impl Inspect for &mut AccountKeyList { fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> { self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64); Ok(()) } } impl AccountKeyList { /// Attempts to load the current set of saved Account Keys from isolated persistent storage. /// Returns the updated AccountKeyList of keys on success, Error otherwise. pub fn load() -> Result<Self, Error> { Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH) } /// Builds an AccountKey list with the provided `keys`. /// A random test file path is used to avoid concurrently running tests from reading/writing /// from/to the same file. #[cfg(test)] pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self { let mut cache = LruCache::new(capacity); keys.into_iter().for_each(|k| { let _ = cache.insert(k, ()); }); let val = rand::thread_rng().gen::<u64>(); let path = format!("data/test_account_keys{}.json", val); Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() } } #[cfg(test)] pub fn path(&self) -> String { self.path.clone().into_os_string().into_string().expect("valid path string") } fn update_inspect(&self) { self.account_key_count.set(self.keys.len() as u64); } /// Returns an Iterator over the saved Account Keys. /// Note: Access via Iterator does not modify LRU state. pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator { self.keys.iter().map(|(k, _)| k) } /// Marks the provided `key` as used in the LRU cache. /// Returns Error if the key does not exist in the cache. pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> { self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used")) } /// Save an Account Key to the persisted set of keys. pub fn save(&mut self, key: AccountKey) { // If the `key` already exists, it will be updated in the LRU cache. If the cache is // full, the least-recently used (LRU) key will be evicted. if self.keys.insert(key, ()).is_some() { debug!("Account Key already saved"); } // Store the updated set of keys in persistent storage. if let Err(e) = self.store() { warn!("Couldn't update key list in isolated persistent storage: {:?}", e); } self.update_inspect(); } /// Returns the service data payload associated with the current set of Account Keys. pub fn service_data(&self) -> Result<Vec<u8>, Error> { if self.keys.is_empty() { return Ok(vec![0x0]); } let salt = rand::thread_rng().gen::<u8>(); self.service_data_internal(salt) } fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> { let account_keys_bytes = bloom_filter(self.keys(), salt)?; let mut result = Vec::new(); // First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to // show UI notification, 0b0010 to hide it). The maximum amount of account key data that can // be represented is 15 bytes (u4::MAX). let length: u8 = match account_keys_bytes.len().try_into() { Ok(len) if len <= 15 => len, _ => return Err(Error::internal("Account key data too large")), }; // For now, we will always request to show the UI notification (TTTT = 0b0000). result.push(length << 4); // Next n bytes are the Bloom-filtered Account Key list. result.extend(account_keys_bytes); // The descriptor value associated with the Salt section of the LE advertisement payload. // Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed. const SALT_DESCRIPTOR: u8 = 0x11; result.push(SALT_DESCRIPTOR); // Final byte is the Salt value. result.push(salt); Ok(result) } // Default file path for Account Keys written to isolated persistent storage. const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json"; /// Attempts to read and parse the contents of the persistent storage at the provided `path`. /// Returns an `AccountKeyList` on success, Error otherwise. fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> { let mut this = Self { keys: LruCache::new(MAX_ACCOUNT_KEYS), path: path::PathBuf::from(path.as_ref()), account_key_count: Default::default(), }; this.load_internal()?; Ok(this) } /// Attempts to update the locally-saved set of keys from persistent storage. /// Returns Error if the storage file is unable to be opened. fn load_internal(&mut self) -> Result<(), Error> { match fs::File::open(&self.path) { Ok(file) => { // Build the LRU cache from the contents of the file. Because keys are stored in // LRU order, we build the cache in the same order to preserve LRU status. debug!("Reading Account Keys from existing file"); let key_list = KeyList::load(file)?; key_list.0.into_iter().for_each(|k| { let _ = self.keys.insert(k, ()); }); Ok(()) } Err(error) if error.kind() == io::ErrorKind::NotFound => { debug!("Persistent storage file not found"); Ok(()) } Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")), } } /// Commits the current set of Account Keys to isolated persistent storage. /// Keys are stored in LRU order. fn store(&self) -> Result<(), Error> { let path = path::Path::new(&self.path); let file_name = path.file_name().ok_or(Error::key_storage( io::ErrorKind::InvalidInput.into(), "couldn't build file name from path", ))?; let file_path = path.with_file_name(file_name.to_os_string()); let file = fs::File::create(&file_path) .map_err(|e| Error::key_storage(e, "couldn't create file"))?; let values = KeyList(self.keys().cloned().collect()); serde_json::to_writer(file, &values)?; Ok(()) } } /// Convenience type for the serialization and deserialization of Account Keys. #[derive(Serialize, Deserialize)] struct KeyList(Vec<AccountKey>); impl KeyList { fn load<R: io::Read>(reader: R) -> Result<Self, Error> { serde_json::from_reader(reader).map_err(Into::into) } } #[cfg(test)] pub(crate) mod tests { use super::*; use assert_matches::assert_matches; /// Loads the set of saved Account Keys from storage and verifies that it's equal to the /// provided `expected_keys`. #[track_caller] pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>( path: P, expected_keys: Vec<AccountKey>, ) { let read_keys = AccountKeyList::load_from_path(path).expect("can read from file"); assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys); } #[test] fn model_id_from_u32() { let normal_id = 0x1234; let id = ModelId::try_from(normal_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x12, 0x34]); let zero_id = 0; let id = ModelId::try_from(zero_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x00, 0x00]); let max_id = 0xffffff; let id = ModelId::try_from(max_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0xff, 0xff, 0xff]); } #[test] fn invalid_model_id_conversion_is_error() { let invalid_id = 0x1ffabcd; assert_matches!(ModelId::try_from(invalid_id), Err(_)); } #[test] fn empty_account_key_list_service_data() { let empty = AccountKeyList::with_capacity_and_keys(1, vec![]); let service_data = empty.service_data().expect("can build service data"); let expected = [0x00]; assert_eq!(service_data, expected); } #[test] fn oversized_service_data_is_error() { // Building an AccountKeyList of 11 elements will result in an oversized service data. // In the future, this test will be obsolete as the AccountKeyList will be bounded in its // construction. let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect(); let oversized = AccountKeyList::with_capacity_and_keys(15, keys); let result = oversized.service_data(); assert_matches!(result, Err(Error::InternalError(_))); } #[test] fn account_key_list_service_data() { let example_key = AccountKey::new([1; 16]); let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]); let salt = 0x14; // Because the service data is generated with a random salt value, we test the internal // method with a controlled salt value so that the test is deterministic. let service_data = keys.service_data_internal(salt).expect("can build service_data"); let expected = [ 0x40, // Length = 4, Show UI indication 0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list 0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14) ]; assert_eq!(service_data, expected); } /// Tests AES-128 encryption & decryption using an Account Key as the Secret Key. /// The contents of this test case are pulled from the GFPS specification. /// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption #[test] fn aes_128_encryption_roundtrip() { let message = [ 0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA, 0x97, 0xEA, ]; let account_key = AccountKey::new([ 0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3, 0x32, 0x1D, ]); let encrypted = account_key.shared_secret().encrypt(&message); let expected = [ 0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0, 0x9E, 0x9C, ]; assert_eq!(encrypted, expected); let decrypted = account_key.shared_secret().decrypt(&encrypted); assert_eq!(decrypted, message); } #[test] fn account_key_lru_eviction() { let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]); let max: u8 = MAX_ACCOUNT_KEYS as u8; for i in 1..max + 1 { let key = AccountKey::new([i; 16]); list.save(key.clone()); assert_eq!(list.keys().len(), i as usize); assert!(list.keys.contains_key(&key)); } // Adding a new key results in the eviction of the LRU key. assert_eq!(list.keys().len(), max as usize); let new_key = AccountKey::new([max + 1; 16]); list.save(new_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&new_key)); // LRU Key is no longer stored. let first_key = AccountKey::new([1; 16]); assert!(!list.keys.contains_key(&first_key)); // Marking a key as used should "refresh" the key's position. It is no longer the LRU key // that will be evicted. let account_key2 = AccountKey::new([2; 16]); assert_matches!(list.mark_used(&account_key2), Ok(_)); // Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore). let next_key = AccountKey::new([max + 2; 16]); list.save(next_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&next_key)); assert!(list.keys.contains_key(&account_key2)); } #[test] fn mark_used_nonexistent_key_is_error() { let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]); let key = AccountKey::new([1; 16]); assert_matches!(list.mark_used(&key), Err(_)); } #[fuchsia::test] fn load_keys_from_nonexistent_file() { const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json"; expect_keys_at_path(EXAMPLE_FILEPATH, vec![]); } #[fuchsia::test] fn commit_and_load_keys_to_and_from_a_file() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); let example_keys = vec![key1, key2, key3]; let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone()); keys.store().expect("can store Account Keys"); expect_keys_at_path(keys.path(), example_keys); } #[fuchsia::test] fn lru_eviction_from_storage() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); // New collection with maximum capacity of 2 keys. let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key1.clone()); expect_keys_at_path(keys.path(), vec![key1.clone()]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key2.clone()); expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]); // Because `key1` already exists in the collection, we expect a cache "refresh" so the key // ordering in storage should change. keys.save(key1.clone()); // e.g The LRU order should change whereby `key2` is now the LRU. expect_keys_at_path(keys.path(), vec![key2, key1.clone()]); // The collection is at max capacity so `key2` (LRU) should be evicted. Local storage // should be updated. keys.save(key3.clone()); expect_keys_at_path(keys.path(), vec![key1, key3]); } }
{ return Err(Error::InvalidModelId(src)); }
conditional_block
mod.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use aes::cipher::generic_array::GenericArray; use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher}; use fuchsia_inspect::{self as inspect, Property}; use fuchsia_inspect_derive::{AttachError, Inspect}; use lru_cache::LruCache; use rand::Rng; use serde::{Deserialize, Serialize}; use std::convert::{TryFrom, TryInto}; use std::{fs, io, path}; use tracing::{debug, warn}; use crate::advertisement::bloom_filter; mod error; pub mod keys; pub mod packets; pub use error::Error; /// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration. #[derive(Debug, Copy, Clone, PartialEq)] pub struct ModelId(u32); impl TryFrom<u32> for ModelId { type Error = Error; fn try_from(src: u32) -> Result<Self, Self::Error> { // u24::MAX if src > 0xffffff { return Err(Error::InvalidModelId(src)); } Ok(Self(src)) } } impl From<ModelId> for [u8; 3] { fn from(src: ModelId) -> [u8; 3] { let mut bytes = [0; 3]; bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]); bytes } } /// A key used during the Fast Pair Pairing Procedure. /// This key is a temporary value that lives for the lifetime of a procedure. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct SharedSecret([u8; 16]); impl SharedSecret { pub fn new(bytes: [u8; 16]) -> Self { Self(bytes) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0 } /// Decrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the decrypted payload. pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.decrypt_block(&mut block); block.into() } /// Encrypts the provided `message` buffer with the AccountKey using AES-128. /// Returns the encrypted payload. pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] { let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes())); let mut block = GenericArray::clone_from_slice(message); cipher.encrypt_block(&mut block); block.into() } } /// A long-lived key that allows the Provider to be recognized as belonging to a certain user /// account. #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct AccountKey(SharedSecret); impl AccountKey { pub fn new(bytes: [u8; 16]) -> Self { Self(SharedSecret::new(bytes)) } pub fn as_bytes(&self) -> &[u8; 16] { &self.0.as_bytes() } pub fn shared_secret(&self) -> &SharedSecret { &self.0 } } impl From<&SharedSecret> for AccountKey { fn from(src: &SharedSecret) -> AccountKey { AccountKey(src.clone()) } } /// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys /// will be evicted in an LRU manner as described in the GFPS specification. /// This limit is chosen as the minimum required by any implementation and provides ample space /// in the LE advertisement packet. /// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList /// for more details. const MAX_ACCOUNT_KEYS: usize = 5; /// Manages the set of saved Account Keys. /// /// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the /// `AccountKeyList` will evict the least recently used Account Key. /// /// Account Keys are written to isolated persistent storage and are maintained across reboots. The /// set of saved keys will only be erased on device factory resets. /// To avoid writing to persistent storage too often, only new Account Keys are written to storage. /// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be /// updated in the backing storage file. pub struct AccountKeyList { /// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value /// as we only care about maintaining the keys. keys: LruCache<AccountKey, ()>, /// The file path pointing to the isolated persistent storage which saves the Account Keys. path: path::PathBuf, /// The number of keys currently saved in the AccountKeyList. account_key_count: inspect::UintProperty, } impl Inspect for &mut AccountKeyList { fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> { self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64); Ok(()) } } impl AccountKeyList { /// Attempts to load the current set of saved Account Keys from isolated persistent storage. /// Returns the updated AccountKeyList of keys on success, Error otherwise. pub fn load() -> Result<Self, Error> { Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH) } /// Builds an AccountKey list with the provided `keys`. /// A random test file path is used to avoid concurrently running tests from reading/writing /// from/to the same file. #[cfg(test)] pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self { let mut cache = LruCache::new(capacity); keys.into_iter().for_each(|k| { let _ = cache.insert(k, ()); }); let val = rand::thread_rng().gen::<u64>(); let path = format!("data/test_account_keys{}.json", val); Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() } } #[cfg(test)] pub fn path(&self) -> String { self.path.clone().into_os_string().into_string().expect("valid path string") } fn update_inspect(&self) { self.account_key_count.set(self.keys.len() as u64); } /// Returns an Iterator over the saved Account Keys. /// Note: Access via Iterator does not modify LRU state. pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator { self.keys.iter().map(|(k, _)| k) } /// Marks the provided `key` as used in the LRU cache. /// Returns Error if the key does not exist in the cache. pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> { self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used")) } /// Save an Account Key to the persisted set of keys. pub fn save(&mut self, key: AccountKey) { // If the `key` already exists, it will be updated in the LRU cache. If the cache is // full, the least-recently used (LRU) key will be evicted. if self.keys.insert(key, ()).is_some() { debug!("Account Key already saved"); } // Store the updated set of keys in persistent storage. if let Err(e) = self.store() { warn!("Couldn't update key list in isolated persistent storage: {:?}", e); } self.update_inspect(); } /// Returns the service data payload associated with the current set of Account Keys. pub fn service_data(&self) -> Result<Vec<u8>, Error> { if self.keys.is_empty() { return Ok(vec![0x0]); } let salt = rand::thread_rng().gen::<u8>(); self.service_data_internal(salt) } fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> { let account_keys_bytes = bloom_filter(self.keys(), salt)?; let mut result = Vec::new(); // First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to // show UI notification, 0b0010 to hide it). The maximum amount of account key data that can // be represented is 15 bytes (u4::MAX). let length: u8 = match account_keys_bytes.len().try_into() { Ok(len) if len <= 15 => len, _ => return Err(Error::internal("Account key data too large")), }; // For now, we will always request to show the UI notification (TTTT = 0b0000). result.push(length << 4); // Next n bytes are the Bloom-filtered Account Key list. result.extend(account_keys_bytes); // The descriptor value associated with the Salt section of the LE advertisement payload. // Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed. const SALT_DESCRIPTOR: u8 = 0x11; result.push(SALT_DESCRIPTOR); // Final byte is the Salt value. result.push(salt); Ok(result) } // Default file path for Account Keys written to isolated persistent storage. const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json"; /// Attempts to read and parse the contents of the persistent storage at the provided `path`. /// Returns an `AccountKeyList` on success, Error otherwise. fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> { let mut this = Self { keys: LruCache::new(MAX_ACCOUNT_KEYS), path: path::PathBuf::from(path.as_ref()), account_key_count: Default::default(), }; this.load_internal()?; Ok(this) } /// Attempts to update the locally-saved set of keys from persistent storage. /// Returns Error if the storage file is unable to be opened. fn load_internal(&mut self) -> Result<(), Error> { match fs::File::open(&self.path) { Ok(file) => { // Build the LRU cache from the contents of the file. Because keys are stored in // LRU order, we build the cache in the same order to preserve LRU status. debug!("Reading Account Keys from existing file"); let key_list = KeyList::load(file)?; key_list.0.into_iter().for_each(|k| { let _ = self.keys.insert(k, ()); }); Ok(()) } Err(error) if error.kind() == io::ErrorKind::NotFound => { debug!("Persistent storage file not found"); Ok(()) } Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")), } } /// Commits the current set of Account Keys to isolated persistent storage. /// Keys are stored in LRU order. fn store(&self) -> Result<(), Error> { let path = path::Path::new(&self.path); let file_name = path.file_name().ok_or(Error::key_storage( io::ErrorKind::InvalidInput.into(), "couldn't build file name from path", ))?; let file_path = path.with_file_name(file_name.to_os_string()); let file = fs::File::create(&file_path) .map_err(|e| Error::key_storage(e, "couldn't create file"))?; let values = KeyList(self.keys().cloned().collect()); serde_json::to_writer(file, &values)?; Ok(()) } } /// Convenience type for the serialization and deserialization of Account Keys. #[derive(Serialize, Deserialize)] struct KeyList(Vec<AccountKey>); impl KeyList { fn load<R: io::Read>(reader: R) -> Result<Self, Error> { serde_json::from_reader(reader).map_err(Into::into) } } #[cfg(test)] pub(crate) mod tests { use super::*; use assert_matches::assert_matches; /// Loads the set of saved Account Keys from storage and verifies that it's equal to the /// provided `expected_keys`. #[track_caller] pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>( path: P, expected_keys: Vec<AccountKey>, ) { let read_keys = AccountKeyList::load_from_path(path).expect("can read from file"); assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys); } #[test] fn model_id_from_u32() { let normal_id = 0x1234; let id = ModelId::try_from(normal_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x12, 0x34]); let zero_id = 0; let id = ModelId::try_from(zero_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0x00, 0x00, 0x00]); let max_id = 0xffffff; let id = ModelId::try_from(max_id).expect("valid id"); let id_bytes: [u8; 3] = id.into(); assert_eq!(id_bytes, [0xff, 0xff, 0xff]); } #[test] fn
() { let invalid_id = 0x1ffabcd; assert_matches!(ModelId::try_from(invalid_id), Err(_)); } #[test] fn empty_account_key_list_service_data() { let empty = AccountKeyList::with_capacity_and_keys(1, vec![]); let service_data = empty.service_data().expect("can build service data"); let expected = [0x00]; assert_eq!(service_data, expected); } #[test] fn oversized_service_data_is_error() { // Building an AccountKeyList of 11 elements will result in an oversized service data. // In the future, this test will be obsolete as the AccountKeyList will be bounded in its // construction. let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect(); let oversized = AccountKeyList::with_capacity_and_keys(15, keys); let result = oversized.service_data(); assert_matches!(result, Err(Error::InternalError(_))); } #[test] fn account_key_list_service_data() { let example_key = AccountKey::new([1; 16]); let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]); let salt = 0x14; // Because the service data is generated with a random salt value, we test the internal // method with a controlled salt value so that the test is deterministic. let service_data = keys.service_data_internal(salt).expect("can build service_data"); let expected = [ 0x40, // Length = 4, Show UI indication 0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list 0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14) ]; assert_eq!(service_data, expected); } /// Tests AES-128 encryption & decryption using an Account Key as the Secret Key. /// The contents of this test case are pulled from the GFPS specification. /// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption #[test] fn aes_128_encryption_roundtrip() { let message = [ 0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA, 0x97, 0xEA, ]; let account_key = AccountKey::new([ 0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3, 0x32, 0x1D, ]); let encrypted = account_key.shared_secret().encrypt(&message); let expected = [ 0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0, 0x9E, 0x9C, ]; assert_eq!(encrypted, expected); let decrypted = account_key.shared_secret().decrypt(&encrypted); assert_eq!(decrypted, message); } #[test] fn account_key_lru_eviction() { let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]); let max: u8 = MAX_ACCOUNT_KEYS as u8; for i in 1..max + 1 { let key = AccountKey::new([i; 16]); list.save(key.clone()); assert_eq!(list.keys().len(), i as usize); assert!(list.keys.contains_key(&key)); } // Adding a new key results in the eviction of the LRU key. assert_eq!(list.keys().len(), max as usize); let new_key = AccountKey::new([max + 1; 16]); list.save(new_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&new_key)); // LRU Key is no longer stored. let first_key = AccountKey::new([1; 16]); assert!(!list.keys.contains_key(&first_key)); // Marking a key as used should "refresh" the key's position. It is no longer the LRU key // that will be evicted. let account_key2 = AccountKey::new([2; 16]); assert_matches!(list.mark_used(&account_key2), Ok(_)); // Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore). let next_key = AccountKey::new([max + 2; 16]); list.save(next_key.clone()); assert_eq!(list.keys().len(), max as usize); assert!(list.keys.contains_key(&next_key)); assert!(list.keys.contains_key(&account_key2)); } #[test] fn mark_used_nonexistent_key_is_error() { let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]); let key = AccountKey::new([1; 16]); assert_matches!(list.mark_used(&key), Err(_)); } #[fuchsia::test] fn load_keys_from_nonexistent_file() { const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json"; expect_keys_at_path(EXAMPLE_FILEPATH, vec![]); } #[fuchsia::test] fn commit_and_load_keys_to_and_from_a_file() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); let example_keys = vec![key1, key2, key3]; let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone()); keys.store().expect("can store Account Keys"); expect_keys_at_path(keys.path(), example_keys); } #[fuchsia::test] fn lru_eviction_from_storage() { let key1 = AccountKey::new([1; 16]); let key2 = AccountKey::new([2; 16]); let key3 = AccountKey::new([3; 16]); // New collection with maximum capacity of 2 keys. let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key1.clone()); expect_keys_at_path(keys.path(), vec![key1.clone()]); // Because this key has never been written before, it should be saved to persistent storage. keys.save(key2.clone()); expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]); // Because `key1` already exists in the collection, we expect a cache "refresh" so the key // ordering in storage should change. keys.save(key1.clone()); // e.g The LRU order should change whereby `key2` is now the LRU. expect_keys_at_path(keys.path(), vec![key2, key1.clone()]); // The collection is at max capacity so `key2` (LRU) should be evicted. Local storage // should be updated. keys.save(key3.clone()); expect_keys_at_path(keys.path(), vec![key1, key3]); } }
invalid_model_id_conversion_is_error
identifier_name
utils.rs
use cairo; use cairo::enums::{FontSlant, FontWeight}; use cairo::prelude::SurfaceExt; use clap::{ crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg, }; use css_color_parser::Color as CssColor; use font_loader::system_fonts; use itertools::Itertools; use log::debug; use regex::Regex; use std::error::Error; use std::iter; use std::str::FromStr; use std::thread::sleep; use std::time::{Duration, Instant}; use xcb; use xcb::ffi::xcb_visualid_t; use crate::{AppConfig, DesktopWindow, RenderWindow}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum HorizontalAlign { Left, Center, Right, } impl FromStr for HorizontalAlign { type Err = (); fn from_str(s: &str) -> Result<HorizontalAlign, ()> { match s { "left" => Ok(HorizontalAlign::Left), "center" => Ok(HorizontalAlign::Center), "right" => Ok(HorizontalAlign::Right), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum VerticalAlign { Top, Center, Bottom, } impl FromStr for VerticalAlign { type Err = (); fn from_str(s: &str) -> Result<VerticalAlign, ()> { match s { "top" => Ok(VerticalAlign::Top), "center" => Ok(VerticalAlign::Center), "bottom" => Ok(VerticalAlign::Bottom), _ => Err(()), } } } /// Checks whether the provided fontconfig font `f` is valid. fn is_truetype_font(f: String) -> Result<(), String> { let v: Vec<_> = f.split(':').collect(); let (family, size) = (v.get(0), v.get(1)); if family.is_none() || size.is_none() { return Err("From font format".to_string()); } if let Err(e) = size.unwrap().parse::<f32>() { return Err(e.description().to_string()); } Ok(()) } /// Validate a color. fn is_valid_color(c: String) -> Result<(), String> { c.parse::<CssColor>().map_err(|_| "Invalid color format")?; Ok(()) } /// Load a system font. fn load_font(font_family: &str) -> Vec<u8> { let font_family_property = system_fonts::FontPropertyBuilder::new() .family(font_family) .build(); let (loaded_font, _) = if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) { (loaded_font, index) } else { eprintln!("Family not found, falling back to first Monospace font"); let mut font_monospace_property = system_fonts::FontPropertyBuilder::new().monospace().build(); let sysfonts = system_fonts::query_specific(&mut font_monospace_property); eprintln!("Falling back to font '{font}'", font = sysfonts[0]); let (loaded_font, index) = system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font"); (loaded_font, index) }; loaded_font } /// Parse a color into a tuple of floats. fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) { ( f64::from(color_str.r) / 255.0, f64::from(color_str.g) / 255.0, f64::from(color_str.b) / 255.0, f64::from(color_str.a), ) } /// Parse app arguments. pub fn parse_args() -> AppConfig { let matches = App::new(crate_name!()) .version(crate_version!()) .author(crate_authors!()) .about(crate_description!()) .global_setting(AppSettings::ColoredHelp) .arg( Arg::with_name("font") .short("f") .long("font") .takes_value(true) .validator(is_truetype_font) .default_value("Mono:72") .help("Use a specific TrueType font with this format: family:size")) .arg( Arg::with_name("hint_chars") .short("c") .long("chars") .takes_value(true) .default_value("sadfjklewcmpgh") .help("Define a set of possbile values to use as hint characters")) .arg( Arg::with_name("margin") .short("m") .long("margin") .takes_value(true) .default_value("0.2") .help("Add an additional margin around the text box (value is a factor of the box size)")) .arg( Arg::with_name("text_color") .long("textcolor") .takes_value(true) .validator(is_valid_color) .default_value("#dddddd") .display_order(49) .help("Text color (CSS notation)")) .arg( Arg::with_name("text_color_alt") .long("textcoloralt") .takes_value(true) .validator(is_valid_color) .default_value("#666666") .display_order(50) .help("Text color alternate (CSS notation)")) .arg( Arg::with_name("bg_color") .long("bgcolor") .takes_value(true) .validator(is_valid_color) .default_value("rgba(30, 30, 30, 0.9)") .display_order(51) .help("Background color (CSS notation)")) .arg( Arg::with_name("horizontal_align") .long("halign") .takes_value(true) .possible_values(&["left", "center", "right"]) .default_value("left") .display_order(100) .help("Horizontal alignment of the box inside the window")) .arg( Arg::with_name("vertical_align") .long("valign") .takes_value(true) .possible_values(&["top", "center", "bottom"]) .default_value("top") .display_order(101) .help("Vertical alignment of the box inside the window")) .arg( Arg::with_name("fill") .long("fill") .conflicts_with_all(&["horizontal_align", "vertical_align", "margin"]) .display_order(102) .help("Completely fill out windows")) .arg( Arg::with_name("print_only") .short("p") .long("printonly") .help("Print the window id only but don't change focus")) .get_matches(); let font = value_t!(matches, "font", String).unwrap(); let v: Vec<_> = font.split(':').collect(); let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap()); let hint_chars = value_t!(matches, "hint_chars", String).unwrap(); let margin = value_t!(matches, "margin", f32).unwrap(); let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap(); let text_color = parse_color(text_color_unparsed); let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap(); let text_color_alt = parse_color(text_color_alt_unparsed); let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap(); let bg_color = parse_color(bg_color_unparsed); let fill = matches.is_present("fill"); let print_only = matches.is_present("print_only"); let (horizontal_align, vertical_align) = if fill { (HorizontalAlign::Center, VerticalAlign::Center) } else
; let loaded_font = load_font(&font_family); AppConfig { font_family, font_size, loaded_font, hint_chars, margin, text_color, text_color_alt, bg_color, fill, print_only, horizontal_align, vertical_align, } } /// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination /// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible /// number of hints we need. pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String { // Figure out which size we need. let mut size_required = 1; while hint_chars.len().pow(size_required) < max_count { size_required += 1; } let mut ret = hint_chars .chars() .next() .expect("No hint_chars found") .to_string(); let it = iter::repeat(hint_chars.chars().rev()) .take(size_required as usize) .multi_cartesian_product(); for c in it { let folded = c.into_iter().collect(); if !current_hints.contains(&&folded) { ret = folded; } } debug!("Returning next hint: {}", ret); ret } pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> { for screen in conn.get_setup().roots() { for depth in screen.allowed_depths() { for vis in depth.visuals() { if visual == vis.visual_id() { return Some(vis); } } } } None } pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents { // Create a buffer image that should be large enough. // TODO: Figure out the maximum size from the largest window on the desktop. // For now we'll use made-up maximum values. let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024) .expect("Couldn't create ImageSurface"); let cr = cairo::Context::new(&surface); cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal); cr.set_font_size(size); cr.text_extents(text) } /// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a /// different color to show that they were in fact typed. pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) { // Paint background. rw.cairo_context.set_operator(cairo::Operator::Source); rw.cairo_context.set_source_rgb( app_config.bg_color.0, app_config.bg_color.1, app_config.bg_color.2, ); rw.cairo_context.paint(); rw.cairo_context.set_operator(cairo::Operator::Over); rw.cairo_context.select_font_face( &app_config.font_family, FontSlant::Normal, FontWeight::Normal, ); rw.cairo_context.set_font_size(app_config.font_size); rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1); if text.starts_with(current_hints) { // Paint already selected chars. rw.cairo_context.set_source_rgba( app_config.text_color_alt.0, app_config.text_color_alt.1, app_config.text_color_alt.2, app_config.text_color_alt.3, ); for c in current_hints.chars() { rw.cairo_context.show_text(&c.to_string()); } } // Paint unselected chars. rw.cairo_context.set_source_rgba( app_config.text_color.0, app_config.text_color.1, app_config.text_color.2, app_config.text_color.3, ); let re = Regex::new(&format!("^{}", current_hints)).unwrap(); for c in re.replace(text, "").chars() { rw.cairo_context.show_text(&c.to_string()); } rw.cairo_context.get_target().flush(); } /// Try to grab the keyboard until `timeout` is reached. /// /// Generally with X, I found that you can't grab global keyboard input without it failing /// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying /// until we eventually succeed. pub fn snatch_keyboard( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_keyboard_cookie = xcb::xproto::grab_keyboard( &conn, true, screen.root(), xcb::CURRENT_TIME, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, ); let grab_keyboard_reply = grab_keyboard_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Try to grab the mouse until `timeout` is reached. /// /// Generally with X, I found that you can't grab global mouse input without it failing sometimes /// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we /// eventually succeed. pub fn snatch_mouse( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_pointer_cookie = xcb::xproto::grab_pointer( &conn, true, screen.root(), xcb::EVENT_MASK_BUTTON_PRESS as u16, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, xcb::NONE, xcb::NONE, xcb::CURRENT_TIME, ); let grab_pointer_reply = grab_pointer_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Sort list of `DesktopWindow`s by position. /// /// This sorts by column first and row second. pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> { dws.sort_by_key(|w| w.pos.0); dws.sort_by_key(|w| w.pos.1); dws } /// Returns true if `r1` and `r2` overlap. fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool { let left_corner_inside = r1.0 < r2.0 + r2.2; let right_corner_inside = r1.0 + r1.2 > r2.0; let top_corner_inside = r1.1 < r2.1 + r2.3; let bottom_corner_inside = r1.1 + r1.3 > r2.1; left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside } /// Finds overlaps and returns a list of those rects in the format (x, y, w, h). pub fn find_overlaps( rws: Vec<&RenderWindow>, rect: (i32, i32, i32, i32), ) -> Vec<(i32, i32, i32, i32)> { let mut overlaps = vec![]; for rw in rws { if intersects(rw.rect, rect) { overlaps.push(rw.rect); } } overlaps } #[cfg(test)] mod tests { use super::*; #[test] fn test_intersects() { assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64))); } #[test] fn test_no_intersect() { assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64))); } }
{ ( value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(), value_t!(matches, "vertical_align", VerticalAlign).unwrap(), ) }
conditional_block
utils.rs
use cairo; use cairo::enums::{FontSlant, FontWeight}; use cairo::prelude::SurfaceExt; use clap::{ crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg, }; use css_color_parser::Color as CssColor; use font_loader::system_fonts; use itertools::Itertools; use log::debug; use regex::Regex; use std::error::Error; use std::iter; use std::str::FromStr; use std::thread::sleep; use std::time::{Duration, Instant}; use xcb; use xcb::ffi::xcb_visualid_t; use crate::{AppConfig, DesktopWindow, RenderWindow}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum HorizontalAlign { Left, Center, Right, } impl FromStr for HorizontalAlign { type Err = (); fn from_str(s: &str) -> Result<HorizontalAlign, ()> { match s { "left" => Ok(HorizontalAlign::Left), "center" => Ok(HorizontalAlign::Center), "right" => Ok(HorizontalAlign::Right), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum VerticalAlign { Top, Center, Bottom, } impl FromStr for VerticalAlign { type Err = (); fn from_str(s: &str) -> Result<VerticalAlign, ()> { match s { "top" => Ok(VerticalAlign::Top), "center" => Ok(VerticalAlign::Center), "bottom" => Ok(VerticalAlign::Bottom), _ => Err(()), } } } /// Checks whether the provided fontconfig font `f` is valid. fn is_truetype_font(f: String) -> Result<(), String> { let v: Vec<_> = f.split(':').collect(); let (family, size) = (v.get(0), v.get(1)); if family.is_none() || size.is_none() { return Err("From font format".to_string()); } if let Err(e) = size.unwrap().parse::<f32>() { return Err(e.description().to_string()); } Ok(()) } /// Validate a color. fn is_valid_color(c: String) -> Result<(), String> { c.parse::<CssColor>().map_err(|_| "Invalid color format")?; Ok(()) } /// Load a system font. fn load_font(font_family: &str) -> Vec<u8> { let font_family_property = system_fonts::FontPropertyBuilder::new() .family(font_family) .build(); let (loaded_font, _) = if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) { (loaded_font, index) } else { eprintln!("Family not found, falling back to first Monospace font"); let mut font_monospace_property = system_fonts::FontPropertyBuilder::new().monospace().build(); let sysfonts = system_fonts::query_specific(&mut font_monospace_property); eprintln!("Falling back to font '{font}'", font = sysfonts[0]); let (loaded_font, index) = system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font"); (loaded_font, index) }; loaded_font } /// Parse a color into a tuple of floats. fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) { ( f64::from(color_str.r) / 255.0, f64::from(color_str.g) / 255.0, f64::from(color_str.b) / 255.0, f64::from(color_str.a), ) } /// Parse app arguments. pub fn parse_args() -> AppConfig { let matches = App::new(crate_name!()) .version(crate_version!()) .author(crate_authors!()) .about(crate_description!()) .global_setting(AppSettings::ColoredHelp) .arg( Arg::with_name("font") .short("f") .long("font") .takes_value(true) .validator(is_truetype_font) .default_value("Mono:72") .help("Use a specific TrueType font with this format: family:size")) .arg( Arg::with_name("hint_chars") .short("c") .long("chars") .takes_value(true) .default_value("sadfjklewcmpgh") .help("Define a set of possbile values to use as hint characters")) .arg( Arg::with_name("margin") .short("m") .long("margin") .takes_value(true) .default_value("0.2") .help("Add an additional margin around the text box (value is a factor of the box size)")) .arg( Arg::with_name("text_color") .long("textcolor") .takes_value(true) .validator(is_valid_color) .default_value("#dddddd") .display_order(49) .help("Text color (CSS notation)")) .arg( Arg::with_name("text_color_alt") .long("textcoloralt") .takes_value(true) .validator(is_valid_color) .default_value("#666666") .display_order(50) .help("Text color alternate (CSS notation)")) .arg( Arg::with_name("bg_color") .long("bgcolor") .takes_value(true) .validator(is_valid_color) .default_value("rgba(30, 30, 30, 0.9)") .display_order(51) .help("Background color (CSS notation)")) .arg( Arg::with_name("horizontal_align") .long("halign") .takes_value(true) .possible_values(&["left", "center", "right"]) .default_value("left") .display_order(100) .help("Horizontal alignment of the box inside the window")) .arg( Arg::with_name("vertical_align") .long("valign") .takes_value(true) .possible_values(&["top", "center", "bottom"]) .default_value("top") .display_order(101) .help("Vertical alignment of the box inside the window")) .arg( Arg::with_name("fill") .long("fill") .conflicts_with_all(&["horizontal_align", "vertical_align", "margin"]) .display_order(102) .help("Completely fill out windows")) .arg( Arg::with_name("print_only") .short("p") .long("printonly") .help("Print the window id only but don't change focus")) .get_matches(); let font = value_t!(matches, "font", String).unwrap(); let v: Vec<_> = font.split(':').collect(); let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap()); let hint_chars = value_t!(matches, "hint_chars", String).unwrap(); let margin = value_t!(matches, "margin", f32).unwrap(); let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap(); let text_color = parse_color(text_color_unparsed); let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap(); let text_color_alt = parse_color(text_color_alt_unparsed); let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap(); let bg_color = parse_color(bg_color_unparsed); let fill = matches.is_present("fill"); let print_only = matches.is_present("print_only"); let (horizontal_align, vertical_align) = if fill { (HorizontalAlign::Center, VerticalAlign::Center) } else { ( value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(), value_t!(matches, "vertical_align", VerticalAlign).unwrap(), ) }; let loaded_font = load_font(&font_family); AppConfig { font_family, font_size, loaded_font, hint_chars, margin, text_color, text_color_alt, bg_color, fill, print_only, horizontal_align, vertical_align, } } /// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination /// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible /// number of hints we need. pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String { // Figure out which size we need. let mut size_required = 1; while hint_chars.len().pow(size_required) < max_count { size_required += 1; } let mut ret = hint_chars .chars() .next() .expect("No hint_chars found") .to_string(); let it = iter::repeat(hint_chars.chars().rev()) .take(size_required as usize) .multi_cartesian_product(); for c in it { let folded = c.into_iter().collect(); if !current_hints.contains(&&folded) { ret = folded; } } debug!("Returning next hint: {}", ret); ret } pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> { for screen in conn.get_setup().roots() { for depth in screen.allowed_depths() { for vis in depth.visuals() { if visual == vis.visual_id() { return Some(vis); } } } } None } pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents { // Create a buffer image that should be large enough. // TODO: Figure out the maximum size from the largest window on the desktop. // For now we'll use made-up maximum values. let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024) .expect("Couldn't create ImageSurface"); let cr = cairo::Context::new(&surface); cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal); cr.set_font_size(size); cr.text_extents(text) } /// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a /// different color to show that they were in fact typed. pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) { // Paint background. rw.cairo_context.set_operator(cairo::Operator::Source); rw.cairo_context.set_source_rgb( app_config.bg_color.0, app_config.bg_color.1, app_config.bg_color.2, ); rw.cairo_context.paint(); rw.cairo_context.set_operator(cairo::Operator::Over); rw.cairo_context.select_font_face( &app_config.font_family, FontSlant::Normal, FontWeight::Normal, ); rw.cairo_context.set_font_size(app_config.font_size); rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1); if text.starts_with(current_hints) { // Paint already selected chars. rw.cairo_context.set_source_rgba( app_config.text_color_alt.0, app_config.text_color_alt.1, app_config.text_color_alt.2, app_config.text_color_alt.3, ); for c in current_hints.chars() { rw.cairo_context.show_text(&c.to_string()); } } // Paint unselected chars. rw.cairo_context.set_source_rgba( app_config.text_color.0, app_config.text_color.1, app_config.text_color.2, app_config.text_color.3, ); let re = Regex::new(&format!("^{}", current_hints)).unwrap(); for c in re.replace(text, "").chars() { rw.cairo_context.show_text(&c.to_string()); } rw.cairo_context.get_target().flush(); } /// Try to grab the keyboard until `timeout` is reached. /// /// Generally with X, I found that you can't grab global keyboard input without it failing /// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying /// until we eventually succeed. pub fn snatch_keyboard( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_keyboard_cookie = xcb::xproto::grab_keyboard( &conn, true, screen.root(), xcb::CURRENT_TIME, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, ); let grab_keyboard_reply = grab_keyboard_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Try to grab the mouse until `timeout` is reached. /// /// Generally with X, I found that you can't grab global mouse input without it failing sometimes /// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we /// eventually succeed. pub fn snatch_mouse( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_pointer_cookie = xcb::xproto::grab_pointer( &conn, true, screen.root(), xcb::EVENT_MASK_BUTTON_PRESS as u16, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, xcb::NONE, xcb::NONE, xcb::CURRENT_TIME, ); let grab_pointer_reply = grab_pointer_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Sort list of `DesktopWindow`s by position. /// /// This sorts by column first and row second. pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> { dws.sort_by_key(|w| w.pos.0); dws.sort_by_key(|w| w.pos.1); dws } /// Returns true if `r1` and `r2` overlap. fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool { let left_corner_inside = r1.0 < r2.0 + r2.2; let right_corner_inside = r1.0 + r1.2 > r2.0; let top_corner_inside = r1.1 < r2.1 + r2.3; let bottom_corner_inside = r1.1 + r1.3 > r2.1; left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside } /// Finds overlaps and returns a list of those rects in the format (x, y, w, h). pub fn find_overlaps( rws: Vec<&RenderWindow>, rect: (i32, i32, i32, i32), ) -> Vec<(i32, i32, i32, i32)> { let mut overlaps = vec![]; for rw in rws { if intersects(rw.rect, rect) { overlaps.push(rw.rect); } } overlaps } #[cfg(test)] mod tests { use super::*; #[test] fn test_intersects() { assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64))); } #[test] fn
() { assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64))); } }
test_no_intersect
identifier_name
utils.rs
use cairo; use cairo::enums::{FontSlant, FontWeight}; use cairo::prelude::SurfaceExt; use clap::{ crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg, }; use css_color_parser::Color as CssColor; use font_loader::system_fonts; use itertools::Itertools; use log::debug; use regex::Regex; use std::error::Error; use std::iter; use std::str::FromStr; use std::thread::sleep; use std::time::{Duration, Instant}; use xcb; use xcb::ffi::xcb_visualid_t; use crate::{AppConfig, DesktopWindow, RenderWindow}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum HorizontalAlign { Left, Center, Right, } impl FromStr for HorizontalAlign { type Err = (); fn from_str(s: &str) -> Result<HorizontalAlign, ()> { match s { "left" => Ok(HorizontalAlign::Left), "center" => Ok(HorizontalAlign::Center), "right" => Ok(HorizontalAlign::Right), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum VerticalAlign { Top, Center, Bottom, } impl FromStr for VerticalAlign { type Err = (); fn from_str(s: &str) -> Result<VerticalAlign, ()> { match s { "top" => Ok(VerticalAlign::Top), "center" => Ok(VerticalAlign::Center), "bottom" => Ok(VerticalAlign::Bottom), _ => Err(()), } } } /// Checks whether the provided fontconfig font `f` is valid. fn is_truetype_font(f: String) -> Result<(), String> { let v: Vec<_> = f.split(':').collect(); let (family, size) = (v.get(0), v.get(1)); if family.is_none() || size.is_none() { return Err("From font format".to_string()); } if let Err(e) = size.unwrap().parse::<f32>() { return Err(e.description().to_string()); } Ok(()) } /// Validate a color. fn is_valid_color(c: String) -> Result<(), String> { c.parse::<CssColor>().map_err(|_| "Invalid color format")?; Ok(()) } /// Load a system font. fn load_font(font_family: &str) -> Vec<u8> { let font_family_property = system_fonts::FontPropertyBuilder::new() .family(font_family) .build(); let (loaded_font, _) = if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) { (loaded_font, index) } else { eprintln!("Family not found, falling back to first Monospace font"); let mut font_monospace_property = system_fonts::FontPropertyBuilder::new().monospace().build(); let sysfonts = system_fonts::query_specific(&mut font_monospace_property); eprintln!("Falling back to font '{font}'", font = sysfonts[0]); let (loaded_font, index) = system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font"); (loaded_font, index) }; loaded_font } /// Parse a color into a tuple of floats. fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) { ( f64::from(color_str.r) / 255.0, f64::from(color_str.g) / 255.0, f64::from(color_str.b) / 255.0, f64::from(color_str.a), ) } /// Parse app arguments. pub fn parse_args() -> AppConfig { let matches = App::new(crate_name!()) .version(crate_version!()) .author(crate_authors!()) .about(crate_description!()) .global_setting(AppSettings::ColoredHelp) .arg( Arg::with_name("font") .short("f") .long("font") .takes_value(true) .validator(is_truetype_font) .default_value("Mono:72") .help("Use a specific TrueType font with this format: family:size")) .arg( Arg::with_name("hint_chars") .short("c") .long("chars") .takes_value(true) .default_value("sadfjklewcmpgh") .help("Define a set of possbile values to use as hint characters")) .arg( Arg::with_name("margin") .short("m") .long("margin") .takes_value(true) .default_value("0.2") .help("Add an additional margin around the text box (value is a factor of the box size)")) .arg( Arg::with_name("text_color") .long("textcolor") .takes_value(true) .validator(is_valid_color) .default_value("#dddddd") .display_order(49) .help("Text color (CSS notation)")) .arg( Arg::with_name("text_color_alt") .long("textcoloralt") .takes_value(true) .validator(is_valid_color) .default_value("#666666") .display_order(50) .help("Text color alternate (CSS notation)")) .arg( Arg::with_name("bg_color") .long("bgcolor") .takes_value(true) .validator(is_valid_color) .default_value("rgba(30, 30, 30, 0.9)") .display_order(51) .help("Background color (CSS notation)")) .arg( Arg::with_name("horizontal_align") .long("halign") .takes_value(true) .possible_values(&["left", "center", "right"]) .default_value("left") .display_order(100) .help("Horizontal alignment of the box inside the window")) .arg( Arg::with_name("vertical_align") .long("valign") .takes_value(true) .possible_values(&["top", "center", "bottom"]) .default_value("top") .display_order(101) .help("Vertical alignment of the box inside the window")) .arg( Arg::with_name("fill") .long("fill") .conflicts_with_all(&["horizontal_align", "vertical_align", "margin"]) .display_order(102) .help("Completely fill out windows")) .arg( Arg::with_name("print_only") .short("p") .long("printonly") .help("Print the window id only but don't change focus")) .get_matches(); let font = value_t!(matches, "font", String).unwrap(); let v: Vec<_> = font.split(':').collect(); let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap()); let hint_chars = value_t!(matches, "hint_chars", String).unwrap(); let margin = value_t!(matches, "margin", f32).unwrap(); let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap(); let text_color = parse_color(text_color_unparsed); let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap(); let text_color_alt = parse_color(text_color_alt_unparsed); let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap(); let bg_color = parse_color(bg_color_unparsed); let fill = matches.is_present("fill"); let print_only = matches.is_present("print_only"); let (horizontal_align, vertical_align) = if fill { (HorizontalAlign::Center, VerticalAlign::Center) } else { ( value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(), value_t!(matches, "vertical_align", VerticalAlign).unwrap(), ) }; let loaded_font = load_font(&font_family); AppConfig { font_family, font_size, loaded_font, hint_chars, margin, text_color, text_color_alt, bg_color, fill, print_only, horizontal_align, vertical_align, } } /// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination /// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible /// number of hints we need. pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String { // Figure out which size we need. let mut size_required = 1; while hint_chars.len().pow(size_required) < max_count { size_required += 1; } let mut ret = hint_chars .chars() .next() .expect("No hint_chars found") .to_string(); let it = iter::repeat(hint_chars.chars().rev()) .take(size_required as usize) .multi_cartesian_product(); for c in it { let folded = c.into_iter().collect(); if !current_hints.contains(&&folded) { ret = folded; } } debug!("Returning next hint: {}", ret); ret } pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> { for screen in conn.get_setup().roots() { for depth in screen.allowed_depths() { for vis in depth.visuals() { if visual == vis.visual_id() { return Some(vis); } } } } None } pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents { // Create a buffer image that should be large enough. // TODO: Figure out the maximum size from the largest window on the desktop. // For now we'll use made-up maximum values. let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024) .expect("Couldn't create ImageSurface"); let cr = cairo::Context::new(&surface); cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal); cr.set_font_size(size); cr.text_extents(text) } /// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a /// different color to show that they were in fact typed. pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) { // Paint background. rw.cairo_context.set_operator(cairo::Operator::Source); rw.cairo_context.set_source_rgb( app_config.bg_color.0, app_config.bg_color.1, app_config.bg_color.2, ); rw.cairo_context.paint(); rw.cairo_context.set_operator(cairo::Operator::Over); rw.cairo_context.select_font_face( &app_config.font_family, FontSlant::Normal, FontWeight::Normal, ); rw.cairo_context.set_font_size(app_config.font_size); rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1); if text.starts_with(current_hints) { // Paint already selected chars. rw.cairo_context.set_source_rgba( app_config.text_color_alt.0, app_config.text_color_alt.1, app_config.text_color_alt.2, app_config.text_color_alt.3, ); for c in current_hints.chars() { rw.cairo_context.show_text(&c.to_string()); } } // Paint unselected chars. rw.cairo_context.set_source_rgba( app_config.text_color.0, app_config.text_color.1, app_config.text_color.2, app_config.text_color.3, ); let re = Regex::new(&format!("^{}", current_hints)).unwrap(); for c in re.replace(text, "").chars() { rw.cairo_context.show_text(&c.to_string()); } rw.cairo_context.get_target().flush(); } /// Try to grab the keyboard until `timeout` is reached. /// /// Generally with X, I found that you can't grab global keyboard input without it failing /// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying /// until we eventually succeed. pub fn snatch_keyboard( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_keyboard_cookie = xcb::xproto::grab_keyboard( &conn, true, screen.root(), xcb::CURRENT_TIME, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, ); let grab_keyboard_reply = grab_keyboard_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Try to grab the mouse until `timeout` is reached. /// /// Generally with X, I found that you can't grab global mouse input without it failing sometimes /// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we /// eventually succeed. pub fn snatch_mouse( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String>
/// Sort list of `DesktopWindow`s by position. /// /// This sorts by column first and row second. pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> { dws.sort_by_key(|w| w.pos.0); dws.sort_by_key(|w| w.pos.1); dws } /// Returns true if `r1` and `r2` overlap. fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool { let left_corner_inside = r1.0 < r2.0 + r2.2; let right_corner_inside = r1.0 + r1.2 > r2.0; let top_corner_inside = r1.1 < r2.1 + r2.3; let bottom_corner_inside = r1.1 + r1.3 > r2.1; left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside } /// Finds overlaps and returns a list of those rects in the format (x, y, w, h). pub fn find_overlaps( rws: Vec<&RenderWindow>, rect: (i32, i32, i32, i32), ) -> Vec<(i32, i32, i32, i32)> { let mut overlaps = vec![]; for rw in rws { if intersects(rw.rect, rect) { overlaps.push(rw.rect); } } overlaps } #[cfg(test)] mod tests { use super::*; #[test] fn test_intersects() { assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64))); } #[test] fn test_no_intersect() { assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64))); } }
{ let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_pointer_cookie = xcb::xproto::grab_pointer( &conn, true, screen.root(), xcb::EVENT_MASK_BUTTON_PRESS as u16, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, xcb::NONE, xcb::NONE, xcb::CURRENT_TIME, ); let grab_pointer_reply = grab_pointer_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } }
identifier_body
utils.rs
use cairo; use cairo::enums::{FontSlant, FontWeight}; use cairo::prelude::SurfaceExt; use clap::{ crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg, }; use css_color_parser::Color as CssColor; use font_loader::system_fonts; use itertools::Itertools; use log::debug; use regex::Regex; use std::error::Error; use std::iter; use std::str::FromStr; use std::thread::sleep; use std::time::{Duration, Instant}; use xcb; use xcb::ffi::xcb_visualid_t; use crate::{AppConfig, DesktopWindow, RenderWindow}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum HorizontalAlign { Left, Center, Right, } impl FromStr for HorizontalAlign { type Err = (); fn from_str(s: &str) -> Result<HorizontalAlign, ()> { match s { "left" => Ok(HorizontalAlign::Left), "center" => Ok(HorizontalAlign::Center), "right" => Ok(HorizontalAlign::Right), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum VerticalAlign { Top, Center, Bottom, } impl FromStr for VerticalAlign { type Err = (); fn from_str(s: &str) -> Result<VerticalAlign, ()> { match s {
"bottom" => Ok(VerticalAlign::Bottom), _ => Err(()), } } } /// Checks whether the provided fontconfig font `f` is valid. fn is_truetype_font(f: String) -> Result<(), String> { let v: Vec<_> = f.split(':').collect(); let (family, size) = (v.get(0), v.get(1)); if family.is_none() || size.is_none() { return Err("From font format".to_string()); } if let Err(e) = size.unwrap().parse::<f32>() { return Err(e.description().to_string()); } Ok(()) } /// Validate a color. fn is_valid_color(c: String) -> Result<(), String> { c.parse::<CssColor>().map_err(|_| "Invalid color format")?; Ok(()) } /// Load a system font. fn load_font(font_family: &str) -> Vec<u8> { let font_family_property = system_fonts::FontPropertyBuilder::new() .family(font_family) .build(); let (loaded_font, _) = if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) { (loaded_font, index) } else { eprintln!("Family not found, falling back to first Monospace font"); let mut font_monospace_property = system_fonts::FontPropertyBuilder::new().monospace().build(); let sysfonts = system_fonts::query_specific(&mut font_monospace_property); eprintln!("Falling back to font '{font}'", font = sysfonts[0]); let (loaded_font, index) = system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font"); (loaded_font, index) }; loaded_font } /// Parse a color into a tuple of floats. fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) { ( f64::from(color_str.r) / 255.0, f64::from(color_str.g) / 255.0, f64::from(color_str.b) / 255.0, f64::from(color_str.a), ) } /// Parse app arguments. pub fn parse_args() -> AppConfig { let matches = App::new(crate_name!()) .version(crate_version!()) .author(crate_authors!()) .about(crate_description!()) .global_setting(AppSettings::ColoredHelp) .arg( Arg::with_name("font") .short("f") .long("font") .takes_value(true) .validator(is_truetype_font) .default_value("Mono:72") .help("Use a specific TrueType font with this format: family:size")) .arg( Arg::with_name("hint_chars") .short("c") .long("chars") .takes_value(true) .default_value("sadfjklewcmpgh") .help("Define a set of possbile values to use as hint characters")) .arg( Arg::with_name("margin") .short("m") .long("margin") .takes_value(true) .default_value("0.2") .help("Add an additional margin around the text box (value is a factor of the box size)")) .arg( Arg::with_name("text_color") .long("textcolor") .takes_value(true) .validator(is_valid_color) .default_value("#dddddd") .display_order(49) .help("Text color (CSS notation)")) .arg( Arg::with_name("text_color_alt") .long("textcoloralt") .takes_value(true) .validator(is_valid_color) .default_value("#666666") .display_order(50) .help("Text color alternate (CSS notation)")) .arg( Arg::with_name("bg_color") .long("bgcolor") .takes_value(true) .validator(is_valid_color) .default_value("rgba(30, 30, 30, 0.9)") .display_order(51) .help("Background color (CSS notation)")) .arg( Arg::with_name("horizontal_align") .long("halign") .takes_value(true) .possible_values(&["left", "center", "right"]) .default_value("left") .display_order(100) .help("Horizontal alignment of the box inside the window")) .arg( Arg::with_name("vertical_align") .long("valign") .takes_value(true) .possible_values(&["top", "center", "bottom"]) .default_value("top") .display_order(101) .help("Vertical alignment of the box inside the window")) .arg( Arg::with_name("fill") .long("fill") .conflicts_with_all(&["horizontal_align", "vertical_align", "margin"]) .display_order(102) .help("Completely fill out windows")) .arg( Arg::with_name("print_only") .short("p") .long("printonly") .help("Print the window id only but don't change focus")) .get_matches(); let font = value_t!(matches, "font", String).unwrap(); let v: Vec<_> = font.split(':').collect(); let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap()); let hint_chars = value_t!(matches, "hint_chars", String).unwrap(); let margin = value_t!(matches, "margin", f32).unwrap(); let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap(); let text_color = parse_color(text_color_unparsed); let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap(); let text_color_alt = parse_color(text_color_alt_unparsed); let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap(); let bg_color = parse_color(bg_color_unparsed); let fill = matches.is_present("fill"); let print_only = matches.is_present("print_only"); let (horizontal_align, vertical_align) = if fill { (HorizontalAlign::Center, VerticalAlign::Center) } else { ( value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(), value_t!(matches, "vertical_align", VerticalAlign).unwrap(), ) }; let loaded_font = load_font(&font_family); AppConfig { font_family, font_size, loaded_font, hint_chars, margin, text_color, text_color_alt, bg_color, fill, print_only, horizontal_align, vertical_align, } } /// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination /// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible /// number of hints we need. pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String { // Figure out which size we need. let mut size_required = 1; while hint_chars.len().pow(size_required) < max_count { size_required += 1; } let mut ret = hint_chars .chars() .next() .expect("No hint_chars found") .to_string(); let it = iter::repeat(hint_chars.chars().rev()) .take(size_required as usize) .multi_cartesian_product(); for c in it { let folded = c.into_iter().collect(); if !current_hints.contains(&&folded) { ret = folded; } } debug!("Returning next hint: {}", ret); ret } pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> { for screen in conn.get_setup().roots() { for depth in screen.allowed_depths() { for vis in depth.visuals() { if visual == vis.visual_id() { return Some(vis); } } } } None } pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents { // Create a buffer image that should be large enough. // TODO: Figure out the maximum size from the largest window on the desktop. // For now we'll use made-up maximum values. let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024) .expect("Couldn't create ImageSurface"); let cr = cairo::Context::new(&surface); cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal); cr.set_font_size(size); cr.text_extents(text) } /// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a /// different color to show that they were in fact typed. pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) { // Paint background. rw.cairo_context.set_operator(cairo::Operator::Source); rw.cairo_context.set_source_rgb( app_config.bg_color.0, app_config.bg_color.1, app_config.bg_color.2, ); rw.cairo_context.paint(); rw.cairo_context.set_operator(cairo::Operator::Over); rw.cairo_context.select_font_face( &app_config.font_family, FontSlant::Normal, FontWeight::Normal, ); rw.cairo_context.set_font_size(app_config.font_size); rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1); if text.starts_with(current_hints) { // Paint already selected chars. rw.cairo_context.set_source_rgba( app_config.text_color_alt.0, app_config.text_color_alt.1, app_config.text_color_alt.2, app_config.text_color_alt.3, ); for c in current_hints.chars() { rw.cairo_context.show_text(&c.to_string()); } } // Paint unselected chars. rw.cairo_context.set_source_rgba( app_config.text_color.0, app_config.text_color.1, app_config.text_color.2, app_config.text_color.3, ); let re = Regex::new(&format!("^{}", current_hints)).unwrap(); for c in re.replace(text, "").chars() { rw.cairo_context.show_text(&c.to_string()); } rw.cairo_context.get_target().flush(); } /// Try to grab the keyboard until `timeout` is reached. /// /// Generally with X, I found that you can't grab global keyboard input without it failing /// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying /// until we eventually succeed. pub fn snatch_keyboard( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_keyboard_cookie = xcb::xproto::grab_keyboard( &conn, true, screen.root(), xcb::CURRENT_TIME, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, ); let grab_keyboard_reply = grab_keyboard_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Try to grab the mouse until `timeout` is reached. /// /// Generally with X, I found that you can't grab global mouse input without it failing sometimes /// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we /// eventually succeed. pub fn snatch_mouse( conn: &xcb::Connection, screen: &xcb::Screen, timeout: Duration, ) -> Result<(), String> { let now = Instant::now(); loop { if now.elapsed() > timeout { return Err(format!( "Couldn't grab keyboard input within {:?}", now.elapsed() )); } let grab_pointer_cookie = xcb::xproto::grab_pointer( &conn, true, screen.root(), xcb::EVENT_MASK_BUTTON_PRESS as u16, xcb::GRAB_MODE_ASYNC as u8, xcb::GRAB_MODE_ASYNC as u8, xcb::NONE, xcb::NONE, xcb::CURRENT_TIME, ); let grab_pointer_reply = grab_pointer_cookie .get_reply() .map_err(|_| "Couldn't communicate with X")?; if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 { return Ok(()); } sleep(Duration::from_millis(1)); } } /// Sort list of `DesktopWindow`s by position. /// /// This sorts by column first and row second. pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> { dws.sort_by_key(|w| w.pos.0); dws.sort_by_key(|w| w.pos.1); dws } /// Returns true if `r1` and `r2` overlap. fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool { let left_corner_inside = r1.0 < r2.0 + r2.2; let right_corner_inside = r1.0 + r1.2 > r2.0; let top_corner_inside = r1.1 < r2.1 + r2.3; let bottom_corner_inside = r1.1 + r1.3 > r2.1; left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside } /// Finds overlaps and returns a list of those rects in the format (x, y, w, h). pub fn find_overlaps( rws: Vec<&RenderWindow>, rect: (i32, i32, i32, i32), ) -> Vec<(i32, i32, i32, i32)> { let mut overlaps = vec![]; for rw in rws { if intersects(rw.rect, rect) { overlaps.push(rw.rect); } } overlaps } #[cfg(test)] mod tests { use super::*; #[test] fn test_intersects() { assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64))); } #[test] fn test_no_intersect() { assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64))); } }
"top" => Ok(VerticalAlign::Top), "center" => Ok(VerticalAlign::Center),
random_line_split
mysql_interactive_worker.rs
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::marker::PhantomData; use std::time::Instant; use common_base::tokio; use common_datablocks::DataBlock; use common_exception::ErrorCode; use common_exception::Result; use common_io::prelude::*; use common_planners::PlanNode; use metrics::histogram; use msql_srv::ErrorKind; use msql_srv::InitWriter; use msql_srv::MysqlShim; use msql_srv::ParamParser; use msql_srv::QueryResultWriter; use msql_srv::StatementMetaWriter; use rand::RngCore; use tokio_stream::StreamExt; use crate::interpreters::InterpreterFactory; use crate::servers::mysql::writers::DFInitResultWriter; use crate::servers::mysql::writers::DFQueryResultWriter; use crate::sessions::DatabendQueryContextRef; use crate::sessions::SessionRef; use crate::sql::PlanParser; use crate::users::CertifiedInfo; struct InteractiveWorkerBase<W: std::io::Write> { session: SessionRef, generic_hold: PhantomData<W>, } pub struct InteractiveWorker<W: std::io::Write> { session: SessionRef, base: InteractiveWorkerBase<W>, version: String, salt: [u8; 20], client_addr: String, } impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> { type Error = ErrorCode; fn version(&self) -> &str { self.version.as_str() } fn connect_id(&self) -> u32 { u32::from_le_bytes([0x08, 0x00, 0x00, 0x00]) } fn default_auth_plugin(&self) -> &str { "mysql_native_password" } fn auth_plugin_for_username(&self, _user: &[u8]) -> &str { "mysql_native_password" } fn salt(&self) -> [u8; 20] { self.salt } fn authenticate( &self, auth_plugin: &str, username: &[u8], salt: &[u8], auth_data: &[u8], ) -> bool { let username = String::from_utf8_lossy(username); let info = CertifiedInfo::create(&username, auth_data, &self.client_addr); let authenticate = self.base.authenticate(auth_plugin, salt, info); futures::executor::block_on(async move { match authenticate.await { Ok(res) => res, Err(failure) => { log::error!( "MySQL handler authenticate failed, \ user_name: {}, \ client_address: {}, \ failure_cause: {}", username, self.client_addr, failure ); false } } }) } fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_prepare(query, writer) } fn on_execute( &mut self, id: u32, param: ParamParser, writer: QueryResultWriter<W>, ) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_execute(id, param, writer) } fn on_close(&mut self, id: u32) { self.base.do_close(id); } fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } let mut writer = DFQueryResultWriter::create(writer); match InteractiveWorkerBase::<W>::build_runtime() { Ok(runtime) => { let instant = Instant::now(); let blocks = runtime.block_on(self.base.do_query(query)); let mut write_result = writer.write(blocks); if let Err(cause) = write_result { let suffix = format!("(while in query {})", query); write_result = Err(cause.add_message_back(suffix)); } histogram!( super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION, instant.elapsed() ); write_result } Err(error) => writer.write(Err(error)), } } fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } DFInitResultWriter::create(writer).write(self.base.do_init(database_name)) } } impl<W: std::io::Write> InteractiveWorkerBase<W> { async fn authenticate( &self, auth_plugin: &str, salt: &[u8], info: CertifiedInfo, ) -> Result<bool> { let user_name = &info.user_name; let address = &info.user_client_address; let user_manager = self.session.get_user_manager(); let user_info = user_manager.get_user(user_name).await?; let input = &info.user_password; let saved = &user_info.password; let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?; user_manager .auth_user(CertifiedInfo::create(user_name, encode_password, address)) .await } fn encoding_password( auth_plugin: &str, salt: &[u8], input: &[u8], user_password: &[u8], ) -> Result<Vec<u8>> { match auth_plugin { "mysql_native_password" if input.is_empty() => Ok(vec![]), "mysql_native_password" => { // SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) ) let mut m = sha1::Sha1::new(); m.update(salt); m.update(user_password); let result = m.digest().bytes(); if input.len() != result.len() { return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed")); } let mut s = Vec::with_capacity(result.len()); for i in 0..result.len() { s.push(input[i] ^ result[i]); } Ok(s) } _ => Ok(input.to_vec()), } } fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Prepare is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_execute( &mut self, _: u32, _: ParamParser<'_>, writer: QueryResultWriter<'_, W>, ) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Execute is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_close(&mut self, _: u32) {} async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> { log::debug!("{}", query); let context = self.session.create_context().await?; context.attach_query_str(query); let query_parser = PlanParser::create(context.clone()); let (plan, hints) = query_parser.build_with_hint_from_sql(query); match hints .iter() .find(|v| v.error_code.is_some()) .and_then(|x| x.error_code) { None => Self::exec_query(plan, &context).await, Some(hint_error_code) => match Self::exec_query(plan, &context).await { Ok(_) => Err(ErrorCode::UnexpectedError(format!( "Expected server error code: {} but got: Ok.", hint_error_code ))), Err(error_code) => { if hint_error_code == error_code.code() { Ok((vec![DataBlock::empty()], String::from(""))) } else { let actual_code = error_code.code(); Err(error_code.add_message(format!( "Expected server error code: {} but got: {}.", hint_error_code, actual_code ))) } } }, } } async fn exec_query( plan: Result<PlanNode>, context: &DatabendQueryContextRef, ) -> Result<(Vec<DataBlock>, String)> { let instant = Instant::now(); let interpreter = InterpreterFactory::get(context.clone(), plan?)?; let data_stream = interpreter.execute().await?; histogram!( super::mysql_metrics::METRIC_INTERPRETER_USEDTIME, instant.elapsed() ); let collector = data_stream.collect::<Result<Vec<DataBlock>>>(); let query_result = collector.await; query_result.map(|data| (data, Self::extra_info(context, instant))) } fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String { let progress = context.get_progress_value(); let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64; format!( "Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.", progress.read_rows, convert_byte_size(progress.read_bytes as f64), seconds, convert_number_size((progress.read_rows as f64) / (seconds as f64)), convert_byte_size((progress.read_bytes as f64) / (seconds as f64)), ) } fn do_init(&mut self, database_name: &str) -> Result<()> { let init_query = format!("USE {};", database_name); let do_query = self.do_query(&init_query); match Self::build_runtime() { Err(error_code) => Err(error_code), Ok(runtime) => match runtime.block_on(do_query) { Ok(_) => Ok(()), Err(error_code) => Err(error_code), }, }
fn build_runtime() -> Result<tokio::runtime::Runtime> { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error))) } } impl<W: std::io::Write> InteractiveWorker<W> { pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> { let mut bs = vec![0u8; 20]; let mut rng = rand::thread_rng(); rng.fill_bytes(bs.as_mut()); let mut scramble: [u8; 20] = [0; 20]; for i in 0..20 { scramble[i] = bs[i]; if scramble[i] == b'\0' || scramble[i] == b'$' { scramble[i] += 1; } } InteractiveWorker::<W> { session: session.clone(), base: InteractiveWorkerBase::<W> { session, generic_hold: PhantomData::default(), }, salt: scramble, // TODO: version version: crate::configs::DATABEND_COMMIT_VERSION.to_string(), client_addr, } } }
}
random_line_split
mysql_interactive_worker.rs
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::marker::PhantomData; use std::time::Instant; use common_base::tokio; use common_datablocks::DataBlock; use common_exception::ErrorCode; use common_exception::Result; use common_io::prelude::*; use common_planners::PlanNode; use metrics::histogram; use msql_srv::ErrorKind; use msql_srv::InitWriter; use msql_srv::MysqlShim; use msql_srv::ParamParser; use msql_srv::QueryResultWriter; use msql_srv::StatementMetaWriter; use rand::RngCore; use tokio_stream::StreamExt; use crate::interpreters::InterpreterFactory; use crate::servers::mysql::writers::DFInitResultWriter; use crate::servers::mysql::writers::DFQueryResultWriter; use crate::sessions::DatabendQueryContextRef; use crate::sessions::SessionRef; use crate::sql::PlanParser; use crate::users::CertifiedInfo; struct InteractiveWorkerBase<W: std::io::Write> { session: SessionRef, generic_hold: PhantomData<W>, } pub struct InteractiveWorker<W: std::io::Write> { session: SessionRef, base: InteractiveWorkerBase<W>, version: String, salt: [u8; 20], client_addr: String, } impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> { type Error = ErrorCode; fn version(&self) -> &str { self.version.as_str() } fn connect_id(&self) -> u32 { u32::from_le_bytes([0x08, 0x00, 0x00, 0x00]) } fn
(&self) -> &str { "mysql_native_password" } fn auth_plugin_for_username(&self, _user: &[u8]) -> &str { "mysql_native_password" } fn salt(&self) -> [u8; 20] { self.salt } fn authenticate( &self, auth_plugin: &str, username: &[u8], salt: &[u8], auth_data: &[u8], ) -> bool { let username = String::from_utf8_lossy(username); let info = CertifiedInfo::create(&username, auth_data, &self.client_addr); let authenticate = self.base.authenticate(auth_plugin, salt, info); futures::executor::block_on(async move { match authenticate.await { Ok(res) => res, Err(failure) => { log::error!( "MySQL handler authenticate failed, \ user_name: {}, \ client_address: {}, \ failure_cause: {}", username, self.client_addr, failure ); false } } }) } fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_prepare(query, writer) } fn on_execute( &mut self, id: u32, param: ParamParser, writer: QueryResultWriter<W>, ) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_execute(id, param, writer) } fn on_close(&mut self, id: u32) { self.base.do_close(id); } fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } let mut writer = DFQueryResultWriter::create(writer); match InteractiveWorkerBase::<W>::build_runtime() { Ok(runtime) => { let instant = Instant::now(); let blocks = runtime.block_on(self.base.do_query(query)); let mut write_result = writer.write(blocks); if let Err(cause) = write_result { let suffix = format!("(while in query {})", query); write_result = Err(cause.add_message_back(suffix)); } histogram!( super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION, instant.elapsed() ); write_result } Err(error) => writer.write(Err(error)), } } fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } DFInitResultWriter::create(writer).write(self.base.do_init(database_name)) } } impl<W: std::io::Write> InteractiveWorkerBase<W> { async fn authenticate( &self, auth_plugin: &str, salt: &[u8], info: CertifiedInfo, ) -> Result<bool> { let user_name = &info.user_name; let address = &info.user_client_address; let user_manager = self.session.get_user_manager(); let user_info = user_manager.get_user(user_name).await?; let input = &info.user_password; let saved = &user_info.password; let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?; user_manager .auth_user(CertifiedInfo::create(user_name, encode_password, address)) .await } fn encoding_password( auth_plugin: &str, salt: &[u8], input: &[u8], user_password: &[u8], ) -> Result<Vec<u8>> { match auth_plugin { "mysql_native_password" if input.is_empty() => Ok(vec![]), "mysql_native_password" => { // SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) ) let mut m = sha1::Sha1::new(); m.update(salt); m.update(user_password); let result = m.digest().bytes(); if input.len() != result.len() { return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed")); } let mut s = Vec::with_capacity(result.len()); for i in 0..result.len() { s.push(input[i] ^ result[i]); } Ok(s) } _ => Ok(input.to_vec()), } } fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Prepare is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_execute( &mut self, _: u32, _: ParamParser<'_>, writer: QueryResultWriter<'_, W>, ) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Execute is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_close(&mut self, _: u32) {} async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> { log::debug!("{}", query); let context = self.session.create_context().await?; context.attach_query_str(query); let query_parser = PlanParser::create(context.clone()); let (plan, hints) = query_parser.build_with_hint_from_sql(query); match hints .iter() .find(|v| v.error_code.is_some()) .and_then(|x| x.error_code) { None => Self::exec_query(plan, &context).await, Some(hint_error_code) => match Self::exec_query(plan, &context).await { Ok(_) => Err(ErrorCode::UnexpectedError(format!( "Expected server error code: {} but got: Ok.", hint_error_code ))), Err(error_code) => { if hint_error_code == error_code.code() { Ok((vec![DataBlock::empty()], String::from(""))) } else { let actual_code = error_code.code(); Err(error_code.add_message(format!( "Expected server error code: {} but got: {}.", hint_error_code, actual_code ))) } } }, } } async fn exec_query( plan: Result<PlanNode>, context: &DatabendQueryContextRef, ) -> Result<(Vec<DataBlock>, String)> { let instant = Instant::now(); let interpreter = InterpreterFactory::get(context.clone(), plan?)?; let data_stream = interpreter.execute().await?; histogram!( super::mysql_metrics::METRIC_INTERPRETER_USEDTIME, instant.elapsed() ); let collector = data_stream.collect::<Result<Vec<DataBlock>>>(); let query_result = collector.await; query_result.map(|data| (data, Self::extra_info(context, instant))) } fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String { let progress = context.get_progress_value(); let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64; format!( "Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.", progress.read_rows, convert_byte_size(progress.read_bytes as f64), seconds, convert_number_size((progress.read_rows as f64) / (seconds as f64)), convert_byte_size((progress.read_bytes as f64) / (seconds as f64)), ) } fn do_init(&mut self, database_name: &str) -> Result<()> { let init_query = format!("USE {};", database_name); let do_query = self.do_query(&init_query); match Self::build_runtime() { Err(error_code) => Err(error_code), Ok(runtime) => match runtime.block_on(do_query) { Ok(_) => Ok(()), Err(error_code) => Err(error_code), }, } } fn build_runtime() -> Result<tokio::runtime::Runtime> { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error))) } } impl<W: std::io::Write> InteractiveWorker<W> { pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> { let mut bs = vec![0u8; 20]; let mut rng = rand::thread_rng(); rng.fill_bytes(bs.as_mut()); let mut scramble: [u8; 20] = [0; 20]; for i in 0..20 { scramble[i] = bs[i]; if scramble[i] == b'\0' || scramble[i] == b'$' { scramble[i] += 1; } } InteractiveWorker::<W> { session: session.clone(), base: InteractiveWorkerBase::<W> { session, generic_hold: PhantomData::default(), }, salt: scramble, // TODO: version version: crate::configs::DATABEND_COMMIT_VERSION.to_string(), client_addr, } } }
default_auth_plugin
identifier_name
mysql_interactive_worker.rs
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::marker::PhantomData; use std::time::Instant; use common_base::tokio; use common_datablocks::DataBlock; use common_exception::ErrorCode; use common_exception::Result; use common_io::prelude::*; use common_planners::PlanNode; use metrics::histogram; use msql_srv::ErrorKind; use msql_srv::InitWriter; use msql_srv::MysqlShim; use msql_srv::ParamParser; use msql_srv::QueryResultWriter; use msql_srv::StatementMetaWriter; use rand::RngCore; use tokio_stream::StreamExt; use crate::interpreters::InterpreterFactory; use crate::servers::mysql::writers::DFInitResultWriter; use crate::servers::mysql::writers::DFQueryResultWriter; use crate::sessions::DatabendQueryContextRef; use crate::sessions::SessionRef; use crate::sql::PlanParser; use crate::users::CertifiedInfo; struct InteractiveWorkerBase<W: std::io::Write> { session: SessionRef, generic_hold: PhantomData<W>, } pub struct InteractiveWorker<W: std::io::Write> { session: SessionRef, base: InteractiveWorkerBase<W>, version: String, salt: [u8; 20], client_addr: String, } impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> { type Error = ErrorCode; fn version(&self) -> &str { self.version.as_str() } fn connect_id(&self) -> u32 { u32::from_le_bytes([0x08, 0x00, 0x00, 0x00]) } fn default_auth_plugin(&self) -> &str { "mysql_native_password" } fn auth_plugin_for_username(&self, _user: &[u8]) -> &str { "mysql_native_password" } fn salt(&self) -> [u8; 20] { self.salt } fn authenticate( &self, auth_plugin: &str, username: &[u8], salt: &[u8], auth_data: &[u8], ) -> bool { let username = String::from_utf8_lossy(username); let info = CertifiedInfo::create(&username, auth_data, &self.client_addr); let authenticate = self.base.authenticate(auth_plugin, salt, info); futures::executor::block_on(async move { match authenticate.await { Ok(res) => res, Err(failure) => { log::error!( "MySQL handler authenticate failed, \ user_name: {}, \ client_address: {}, \ failure_cause: {}", username, self.client_addr, failure ); false } } }) } fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_prepare(query, writer) } fn on_execute( &mut self, id: u32, param: ParamParser, writer: QueryResultWriter<W>, ) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } self.base.do_execute(id, param, writer) } fn on_close(&mut self, id: u32) { self.base.do_close(id); } fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } let mut writer = DFQueryResultWriter::create(writer); match InteractiveWorkerBase::<W>::build_runtime() { Ok(runtime) => { let instant = Instant::now(); let blocks = runtime.block_on(self.base.do_query(query)); let mut write_result = writer.write(blocks); if let Err(cause) = write_result { let suffix = format!("(while in query {})", query); write_result = Err(cause.add_message_back(suffix)); } histogram!( super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION, instant.elapsed() ); write_result } Err(error) => writer.write(Err(error)), } } fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> { if self.session.is_aborting() { writer.error( ErrorKind::ER_ABORTING_CONNECTION, "Aborting this connection. because we are try aborting server.".as_bytes(), )?; return Err(ErrorCode::AbortedSession( "Aborting this connection. because we are try aborting server.", )); } DFInitResultWriter::create(writer).write(self.base.do_init(database_name)) } } impl<W: std::io::Write> InteractiveWorkerBase<W> { async fn authenticate( &self, auth_plugin: &str, salt: &[u8], info: CertifiedInfo, ) -> Result<bool>
fn encoding_password( auth_plugin: &str, salt: &[u8], input: &[u8], user_password: &[u8], ) -> Result<Vec<u8>> { match auth_plugin { "mysql_native_password" if input.is_empty() => Ok(vec![]), "mysql_native_password" => { // SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) ) let mut m = sha1::Sha1::new(); m.update(salt); m.update(user_password); let result = m.digest().bytes(); if input.len() != result.len() { return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed")); } let mut s = Vec::with_capacity(result.len()); for i in 0..result.len() { s.push(input[i] ^ result[i]); } Ok(s) } _ => Ok(input.to_vec()), } } fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Prepare is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_execute( &mut self, _: u32, _: ParamParser<'_>, writer: QueryResultWriter<'_, W>, ) -> Result<()> { writer.error( ErrorKind::ER_UNKNOWN_ERROR, "Execute is not support in Databend.".as_bytes(), )?; Ok(()) } fn do_close(&mut self, _: u32) {} async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> { log::debug!("{}", query); let context = self.session.create_context().await?; context.attach_query_str(query); let query_parser = PlanParser::create(context.clone()); let (plan, hints) = query_parser.build_with_hint_from_sql(query); match hints .iter() .find(|v| v.error_code.is_some()) .and_then(|x| x.error_code) { None => Self::exec_query(plan, &context).await, Some(hint_error_code) => match Self::exec_query(plan, &context).await { Ok(_) => Err(ErrorCode::UnexpectedError(format!( "Expected server error code: {} but got: Ok.", hint_error_code ))), Err(error_code) => { if hint_error_code == error_code.code() { Ok((vec![DataBlock::empty()], String::from(""))) } else { let actual_code = error_code.code(); Err(error_code.add_message(format!( "Expected server error code: {} but got: {}.", hint_error_code, actual_code ))) } } }, } } async fn exec_query( plan: Result<PlanNode>, context: &DatabendQueryContextRef, ) -> Result<(Vec<DataBlock>, String)> { let instant = Instant::now(); let interpreter = InterpreterFactory::get(context.clone(), plan?)?; let data_stream = interpreter.execute().await?; histogram!( super::mysql_metrics::METRIC_INTERPRETER_USEDTIME, instant.elapsed() ); let collector = data_stream.collect::<Result<Vec<DataBlock>>>(); let query_result = collector.await; query_result.map(|data| (data, Self::extra_info(context, instant))) } fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String { let progress = context.get_progress_value(); let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64; format!( "Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.", progress.read_rows, convert_byte_size(progress.read_bytes as f64), seconds, convert_number_size((progress.read_rows as f64) / (seconds as f64)), convert_byte_size((progress.read_bytes as f64) / (seconds as f64)), ) } fn do_init(&mut self, database_name: &str) -> Result<()> { let init_query = format!("USE {};", database_name); let do_query = self.do_query(&init_query); match Self::build_runtime() { Err(error_code) => Err(error_code), Ok(runtime) => match runtime.block_on(do_query) { Ok(_) => Ok(()), Err(error_code) => Err(error_code), }, } } fn build_runtime() -> Result<tokio::runtime::Runtime> { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error))) } } impl<W: std::io::Write> InteractiveWorker<W> { pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> { let mut bs = vec![0u8; 20]; let mut rng = rand::thread_rng(); rng.fill_bytes(bs.as_mut()); let mut scramble: [u8; 20] = [0; 20]; for i in 0..20 { scramble[i] = bs[i]; if scramble[i] == b'\0' || scramble[i] == b'$' { scramble[i] += 1; } } InteractiveWorker::<W> { session: session.clone(), base: InteractiveWorkerBase::<W> { session, generic_hold: PhantomData::default(), }, salt: scramble, // TODO: version version: crate::configs::DATABEND_COMMIT_VERSION.to_string(), client_addr, } } }
{ let user_name = &info.user_name; let address = &info.user_client_address; let user_manager = self.session.get_user_manager(); let user_info = user_manager.get_user(user_name).await?; let input = &info.user_password; let saved = &user_info.password; let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?; user_manager .auth_user(CertifiedInfo::create(user_name, encode_password, address)) .await }
identifier_body
lib.rs
use itertools::multiunzip; use rust_htslib::{bam, bam::ext::BamRecordExtensions}; use std::collections::HashMap; use std::fmt::{Debug, Display}; /// Merge two lists into a sorted list /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T> where T: Ord, T: Clone, { let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect(); x.sort(); x } /// Merge two lists based on a key /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> /// ``` /// use bamlift::*; /// let x = vec![1,3]; /// let x_q = vec!["a","b"]; /// let y = vec![2,4]; /// let y_q = vec!["c", "d"]; /// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q); /// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]); /// ``` pub fn merge_two_lists_with_qual<T, U>( left: &[T], left_q: &[U], right: &[T], right_q: &[U], ) -> Vec<(T, U)> where T: Ord, T: Clone, U: Clone, { let l = left .iter() .zip(left_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let r = right .iter() .zip(right_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let mut x: Vec<(T, U)> = l.chain(r).collect(); x.sort_by_key(|(a, _b)| a.clone()); x } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence( record: &bam::Record, input_positions: &[i64], ) -> Vec<i64> { // reverse positions if needed let positions: Vec<i64> = if record.is_reverse() { let seq_len = i64::try_from(record.seq_len()).unwrap(); input_positions .iter() .rev() .map(|p| seq_len - p - 1) .collect() } else { input_positions.to_vec() }; positions } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence_in_place( record: &bam::Record, input_positions: &mut Vec<i64>, part_of_range: bool, ) { if !record.is_reverse() { return; } let seq_len = i64::try_from(record.seq_len()).unwrap(); // need to correct for going from [) to (] if we are part of a range let offset = if part_of_range { 0 } else { 1 }; for p in input_positions.iter_mut() { *p = seq_len - *p - offset; } input_positions.reverse(); } #[inline(always)] pub fn
<T>(v: &[T]) -> bool where T: Ord, { v.windows(2).all(|w| w[0] <= w[1]) } /// search a sorted array for insertions positions of another sorted array /// returned index i satisfies /// left /// a\[i-1\] < v <= a\[i\] /// right /// a\[i-1\] <= v < a\[i\] /// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html> /// ``` /// use bamlift::*; /// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10]; /// let v = vec![0, 1, 3, 4, 11, 11]; /// let indexes = search_sorted(&a, &v); /// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]); /// ``` pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize> where T: Ord, T: Display, [T]: Debug, { if !is_sorted(v) { panic!("v is not sorted: {:?}", v); } let mut indexes = Vec::with_capacity(v.len()); let mut a_idx = 0; for cur_v in v { while a_idx < a.len() { // check starting condition if a_idx == 0 && *cur_v <= a[a_idx] { indexes.push(0); break; } else if a_idx == 0 { a_idx += 1; } // end condition if a_idx == a.len() - 1 && *cur_v > a[a_idx] { indexes.push(a_idx + 1); break; } // middle of the array else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) { indexes.push(a_idx); break; } a_idx += 1; } } log::trace!("search_sorted: {:?}\n{:?}", v, indexes); indexes } // // CLOSEST LIFTOVER FUNCTIONS // /// this is a helper function for liftover_closest that should only be called from there /// The exception for this is test cases, where it should be easier to test this function /// directly. fn liftover_closest( positions: &[i64], aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, ) -> Vec<Option<i64>> { // skip empty if positions.is_empty() { return vec![]; } if aligned_block_pairs.is_empty() { return positions.iter().map(|_x| None).collect(); } assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the closest position for every position let mut starting_block = 0; let ending_block = aligned_block_pairs.len(); let mut pos_mapping = HashMap::new(); for cur_pos in positions { pos_mapping.insert(cur_pos, (-1, i64::MAX)); let mut current_block = 0; for block_index in starting_block..ending_block { // get the current alignment block let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index]; // get the previous closest position let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap(); // exact match found if cur_pos >= &q_st && cur_pos < &q_en { let dist_from_start = cur_pos - q_st; *best_diff = 0; *best_r_pos = r_st + dist_from_start; break; } // we are before the start of the block else if cur_pos < &q_st { let diff = (q_st - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_st; } } // we are past the end of the block else if cur_pos >= &q_en { let diff = (q_en - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_en; } // we don't need to return to previous blocks since the input is sorted starting_block = current_block; } current_block += 1; } } let mut rtn = vec![]; for q_pos in positions { let (r_pos, diff) = pos_mapping.get(q_pos).unwrap(); if *r_pos == -1 && *diff == i64::MAX { rtn.push(None); } else { rtn.push(Some(*r_pos)); } } assert_eq!(rtn.len(), positions.len()); rtn } /// find the closest reference positions for a list of query positions pub fn lift_reference_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, query_positions: &[i64], ) -> Vec<Option<i64>> { liftover_closest(query_positions, aligned_block_pairs) } /// find the closest query positions for a list of reference positions pub fn lift_query_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, reference_positions: &[i64], ) -> Vec<Option<i64>> { // if lifting to the query, we need to reverse the pairs let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect(); liftover_closest(reference_positions, &aligned_block_pairs) } fn lift_range( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, starts: &[i64], ends: &[i64], lift_reference_to_query: bool, ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { assert_eq!(starts.len(), ends.len()); let (ref_starts, ref_ends) = if !lift_reference_to_query { ( lift_reference_positions(aligned_block_pairs, starts), lift_reference_positions(aligned_block_pairs, ends), ) } else { ( lift_query_positions(aligned_block_pairs, starts), lift_query_positions(aligned_block_pairs, ends), ) }; assert_eq!(ref_starts.len(), ref_ends.len()); let rtn = ref_starts .into_iter() .zip(ref_ends.into_iter()) .map(|(start, end)| match (start, end) { (Some(start), Some(end)) => { if start == end { (None, None, None) } else { (Some(start), Some(end), Some(end - start)) } } _ => (None, None, None), }) .collect::<Vec<_>>(); multiunzip(rtn) } /// Find the closest range but hopefully better pub fn lift_query_range( record: &bam::Record, starts: &[i64], ends: &[i64], ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { // get the aligned block pairs let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); lift_range(&aligned_block_pairs, starts, ends, false) } // // EXACT LIFTOVER FUNCTIONS // /// liftover positions using the cigar string fn liftover_exact( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, positions: &[i64], lift_reference_to_query: bool, ) -> Vec<Option<i64>> { assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the shared positions in the reference let mut return_positions = vec![]; let mut cur_idx = 0; // ends are not inclusive, I checked. for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs { let (st, en) = if !lift_reference_to_query { (q_st, q_en) } else { (r_st, r_en) }; // check bounds if cur_idx == positions.len() { break; } let mut cur_pos = positions[cur_idx]; // need to go to the next block while cur_pos < *en { if cur_pos >= *st { let dist_from_start = cur_pos - st; let rtn_pos = if !lift_reference_to_query { r_st + dist_from_start } else { q_st + dist_from_start }; return_positions.push(Some(rtn_pos)); } else { return_positions.push(None); } // reset current position cur_idx += 1; if cur_idx == positions.len() { break; } cur_pos = positions[cur_idx]; } } // add values for things that won't lift at the end while positions.len() > return_positions.len() { return_positions.push(None); } assert_eq!(positions.len(), return_positions.len()); return_positions } pub fn lift_reference_positions_exact( record: &bam::Record, query_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { query_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, query_positions, false) } } pub fn lift_query_positions_exact( record: &bam::Record, reference_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { reference_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, reference_positions, true) } }
is_sorted
identifier_name
lib.rs
use itertools::multiunzip; use rust_htslib::{bam, bam::ext::BamRecordExtensions}; use std::collections::HashMap; use std::fmt::{Debug, Display}; /// Merge two lists into a sorted list /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T> where T: Ord, T: Clone, { let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect(); x.sort(); x } /// Merge two lists based on a key /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> /// ``` /// use bamlift::*; /// let x = vec![1,3]; /// let x_q = vec!["a","b"]; /// let y = vec![2,4]; /// let y_q = vec!["c", "d"]; /// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q); /// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]); /// ``` pub fn merge_two_lists_with_qual<T, U>( left: &[T], left_q: &[U], right: &[T], right_q: &[U], ) -> Vec<(T, U)> where T: Ord, T: Clone, U: Clone, { let l = left .iter() .zip(left_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let r = right .iter() .zip(right_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let mut x: Vec<(T, U)> = l.chain(r).collect(); x.sort_by_key(|(a, _b)| a.clone()); x } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence( record: &bam::Record, input_positions: &[i64], ) -> Vec<i64> { // reverse positions if needed let positions: Vec<i64> = if record.is_reverse() { let seq_len = i64::try_from(record.seq_len()).unwrap(); input_positions .iter() .rev() .map(|p| seq_len - p - 1) .collect() } else { input_positions.to_vec() }; positions } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence_in_place( record: &bam::Record, input_positions: &mut Vec<i64>, part_of_range: bool, ) { if !record.is_reverse() { return; } let seq_len = i64::try_from(record.seq_len()).unwrap(); // need to correct for going from [) to (] if we are part of a range let offset = if part_of_range { 0 } else { 1 }; for p in input_positions.iter_mut() { *p = seq_len - *p - offset; } input_positions.reverse(); } #[inline(always)] pub fn is_sorted<T>(v: &[T]) -> bool where T: Ord, { v.windows(2).all(|w| w[0] <= w[1]) } /// search a sorted array for insertions positions of another sorted array /// returned index i satisfies /// left /// a\[i-1\] < v <= a\[i\] /// right /// a\[i-1\] <= v < a\[i\] /// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html> /// ``` /// use bamlift::*; /// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10]; /// let v = vec![0, 1, 3, 4, 11, 11]; /// let indexes = search_sorted(&a, &v); /// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]); /// ``` pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize> where T: Ord, T: Display, [T]: Debug, { if !is_sorted(v) { panic!("v is not sorted: {:?}", v); } let mut indexes = Vec::with_capacity(v.len()); let mut a_idx = 0; for cur_v in v { while a_idx < a.len() { // check starting condition if a_idx == 0 && *cur_v <= a[a_idx] { indexes.push(0); break; } else if a_idx == 0 { a_idx += 1; } // end condition if a_idx == a.len() - 1 && *cur_v > a[a_idx] { indexes.push(a_idx + 1); break; } // middle of the array else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) { indexes.push(a_idx); break; } a_idx += 1; } } log::trace!("search_sorted: {:?}\n{:?}", v, indexes); indexes } // // CLOSEST LIFTOVER FUNCTIONS // /// this is a helper function for liftover_closest that should only be called from there /// The exception for this is test cases, where it should be easier to test this function /// directly. fn liftover_closest( positions: &[i64], aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, ) -> Vec<Option<i64>> { // skip empty if positions.is_empty() { return vec![]; } if aligned_block_pairs.is_empty() { return positions.iter().map(|_x| None).collect(); } assert!( is_sorted(positions), "Positions must be sorted before calling liftover!"
let ending_block = aligned_block_pairs.len(); let mut pos_mapping = HashMap::new(); for cur_pos in positions { pos_mapping.insert(cur_pos, (-1, i64::MAX)); let mut current_block = 0; for block_index in starting_block..ending_block { // get the current alignment block let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index]; // get the previous closest position let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap(); // exact match found if cur_pos >= &q_st && cur_pos < &q_en { let dist_from_start = cur_pos - q_st; *best_diff = 0; *best_r_pos = r_st + dist_from_start; break; } // we are before the start of the block else if cur_pos < &q_st { let diff = (q_st - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_st; } } // we are past the end of the block else if cur_pos >= &q_en { let diff = (q_en - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_en; } // we don't need to return to previous blocks since the input is sorted starting_block = current_block; } current_block += 1; } } let mut rtn = vec![]; for q_pos in positions { let (r_pos, diff) = pos_mapping.get(q_pos).unwrap(); if *r_pos == -1 && *diff == i64::MAX { rtn.push(None); } else { rtn.push(Some(*r_pos)); } } assert_eq!(rtn.len(), positions.len()); rtn } /// find the closest reference positions for a list of query positions pub fn lift_reference_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, query_positions: &[i64], ) -> Vec<Option<i64>> { liftover_closest(query_positions, aligned_block_pairs) } /// find the closest query positions for a list of reference positions pub fn lift_query_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, reference_positions: &[i64], ) -> Vec<Option<i64>> { // if lifting to the query, we need to reverse the pairs let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect(); liftover_closest(reference_positions, &aligned_block_pairs) } fn lift_range( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, starts: &[i64], ends: &[i64], lift_reference_to_query: bool, ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { assert_eq!(starts.len(), ends.len()); let (ref_starts, ref_ends) = if !lift_reference_to_query { ( lift_reference_positions(aligned_block_pairs, starts), lift_reference_positions(aligned_block_pairs, ends), ) } else { ( lift_query_positions(aligned_block_pairs, starts), lift_query_positions(aligned_block_pairs, ends), ) }; assert_eq!(ref_starts.len(), ref_ends.len()); let rtn = ref_starts .into_iter() .zip(ref_ends.into_iter()) .map(|(start, end)| match (start, end) { (Some(start), Some(end)) => { if start == end { (None, None, None) } else { (Some(start), Some(end), Some(end - start)) } } _ => (None, None, None), }) .collect::<Vec<_>>(); multiunzip(rtn) } /// Find the closest range but hopefully better pub fn lift_query_range( record: &bam::Record, starts: &[i64], ends: &[i64], ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { // get the aligned block pairs let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); lift_range(&aligned_block_pairs, starts, ends, false) } // // EXACT LIFTOVER FUNCTIONS // /// liftover positions using the cigar string fn liftover_exact( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, positions: &[i64], lift_reference_to_query: bool, ) -> Vec<Option<i64>> { assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the shared positions in the reference let mut return_positions = vec![]; let mut cur_idx = 0; // ends are not inclusive, I checked. for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs { let (st, en) = if !lift_reference_to_query { (q_st, q_en) } else { (r_st, r_en) }; // check bounds if cur_idx == positions.len() { break; } let mut cur_pos = positions[cur_idx]; // need to go to the next block while cur_pos < *en { if cur_pos >= *st { let dist_from_start = cur_pos - st; let rtn_pos = if !lift_reference_to_query { r_st + dist_from_start } else { q_st + dist_from_start }; return_positions.push(Some(rtn_pos)); } else { return_positions.push(None); } // reset current position cur_idx += 1; if cur_idx == positions.len() { break; } cur_pos = positions[cur_idx]; } } // add values for things that won't lift at the end while positions.len() > return_positions.len() { return_positions.push(None); } assert_eq!(positions.len(), return_positions.len()); return_positions } pub fn lift_reference_positions_exact( record: &bam::Record, query_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { query_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, query_positions, false) } } pub fn lift_query_positions_exact( record: &bam::Record, reference_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { reference_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, reference_positions, true) } }
); // find the closest position for every position let mut starting_block = 0;
random_line_split
lib.rs
use itertools::multiunzip; use rust_htslib::{bam, bam::ext::BamRecordExtensions}; use std::collections::HashMap; use std::fmt::{Debug, Display}; /// Merge two lists into a sorted list /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T> where T: Ord, T: Clone, { let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect(); x.sort(); x } /// Merge two lists based on a key /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> /// ``` /// use bamlift::*; /// let x = vec![1,3]; /// let x_q = vec!["a","b"]; /// let y = vec![2,4]; /// let y_q = vec!["c", "d"]; /// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q); /// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]); /// ``` pub fn merge_two_lists_with_qual<T, U>( left: &[T], left_q: &[U], right: &[T], right_q: &[U], ) -> Vec<(T, U)> where T: Ord, T: Clone, U: Clone, { let l = left .iter() .zip(left_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let r = right .iter() .zip(right_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let mut x: Vec<(T, U)> = l.chain(r).collect(); x.sort_by_key(|(a, _b)| a.clone()); x } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence( record: &bam::Record, input_positions: &[i64], ) -> Vec<i64> { // reverse positions if needed let positions: Vec<i64> = if record.is_reverse() { let seq_len = i64::try_from(record.seq_len()).unwrap(); input_positions .iter() .rev() .map(|p| seq_len - p - 1) .collect() } else { input_positions.to_vec() }; positions } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence_in_place( record: &bam::Record, input_positions: &mut Vec<i64>, part_of_range: bool, ) { if !record.is_reverse() { return; } let seq_len = i64::try_from(record.seq_len()).unwrap(); // need to correct for going from [) to (] if we are part of a range let offset = if part_of_range { 0 } else { 1 }; for p in input_positions.iter_mut() { *p = seq_len - *p - offset; } input_positions.reverse(); } #[inline(always)] pub fn is_sorted<T>(v: &[T]) -> bool where T: Ord, { v.windows(2).all(|w| w[0] <= w[1]) } /// search a sorted array for insertions positions of another sorted array /// returned index i satisfies /// left /// a\[i-1\] < v <= a\[i\] /// right /// a\[i-1\] <= v < a\[i\] /// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html> /// ``` /// use bamlift::*; /// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10]; /// let v = vec![0, 1, 3, 4, 11, 11]; /// let indexes = search_sorted(&a, &v); /// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]); /// ``` pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize> where T: Ord, T: Display, [T]: Debug, { if !is_sorted(v) { panic!("v is not sorted: {:?}", v); } let mut indexes = Vec::with_capacity(v.len()); let mut a_idx = 0; for cur_v in v { while a_idx < a.len() { // check starting condition if a_idx == 0 && *cur_v <= a[a_idx] { indexes.push(0); break; } else if a_idx == 0 { a_idx += 1; } // end condition if a_idx == a.len() - 1 && *cur_v > a[a_idx] { indexes.push(a_idx + 1); break; } // middle of the array else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) { indexes.push(a_idx); break; } a_idx += 1; } } log::trace!("search_sorted: {:?}\n{:?}", v, indexes); indexes } // // CLOSEST LIFTOVER FUNCTIONS // /// this is a helper function for liftover_closest that should only be called from there /// The exception for this is test cases, where it should be easier to test this function /// directly. fn liftover_closest( positions: &[i64], aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, ) -> Vec<Option<i64>> { // skip empty if positions.is_empty() { return vec![]; } if aligned_block_pairs.is_empty() { return positions.iter().map(|_x| None).collect(); } assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the closest position for every position let mut starting_block = 0; let ending_block = aligned_block_pairs.len(); let mut pos_mapping = HashMap::new(); for cur_pos in positions { pos_mapping.insert(cur_pos, (-1, i64::MAX)); let mut current_block = 0; for block_index in starting_block..ending_block { // get the current alignment block let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index]; // get the previous closest position let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap(); // exact match found if cur_pos >= &q_st && cur_pos < &q_en { let dist_from_start = cur_pos - q_st; *best_diff = 0; *best_r_pos = r_st + dist_from_start; break; } // we are before the start of the block else if cur_pos < &q_st { let diff = (q_st - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_st; } } // we are past the end of the block else if cur_pos >= &q_en { let diff = (q_en - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_en; } // we don't need to return to previous blocks since the input is sorted starting_block = current_block; } current_block += 1; } } let mut rtn = vec![]; for q_pos in positions { let (r_pos, diff) = pos_mapping.get(q_pos).unwrap(); if *r_pos == -1 && *diff == i64::MAX { rtn.push(None); } else { rtn.push(Some(*r_pos)); } } assert_eq!(rtn.len(), positions.len()); rtn } /// find the closest reference positions for a list of query positions pub fn lift_reference_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, query_positions: &[i64], ) -> Vec<Option<i64>> { liftover_closest(query_positions, aligned_block_pairs) } /// find the closest query positions for a list of reference positions pub fn lift_query_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, reference_positions: &[i64], ) -> Vec<Option<i64>> { // if lifting to the query, we need to reverse the pairs let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect(); liftover_closest(reference_positions, &aligned_block_pairs) } fn lift_range( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, starts: &[i64], ends: &[i64], lift_reference_to_query: bool, ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { assert_eq!(starts.len(), ends.len()); let (ref_starts, ref_ends) = if !lift_reference_to_query { ( lift_reference_positions(aligned_block_pairs, starts), lift_reference_positions(aligned_block_pairs, ends), ) } else { ( lift_query_positions(aligned_block_pairs, starts), lift_query_positions(aligned_block_pairs, ends), ) }; assert_eq!(ref_starts.len(), ref_ends.len()); let rtn = ref_starts .into_iter() .zip(ref_ends.into_iter()) .map(|(start, end)| match (start, end) { (Some(start), Some(end)) => { if start == end { (None, None, None) } else { (Some(start), Some(end), Some(end - start)) } } _ => (None, None, None), }) .collect::<Vec<_>>(); multiunzip(rtn) } /// Find the closest range but hopefully better pub fn lift_query_range( record: &bam::Record, starts: &[i64], ends: &[i64], ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { // get the aligned block pairs let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); lift_range(&aligned_block_pairs, starts, ends, false) } // // EXACT LIFTOVER FUNCTIONS // /// liftover positions using the cigar string fn liftover_exact( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, positions: &[i64], lift_reference_to_query: bool, ) -> Vec<Option<i64>> { assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the shared positions in the reference let mut return_positions = vec![]; let mut cur_idx = 0; // ends are not inclusive, I checked. for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs { let (st, en) = if !lift_reference_to_query { (q_st, q_en) } else { (r_st, r_en) }; // check bounds if cur_idx == positions.len() { break; } let mut cur_pos = positions[cur_idx]; // need to go to the next block while cur_pos < *en { if cur_pos >= *st { let dist_from_start = cur_pos - st; let rtn_pos = if !lift_reference_to_query { r_st + dist_from_start } else { q_st + dist_from_start }; return_positions.push(Some(rtn_pos)); } else { return_positions.push(None); } // reset current position cur_idx += 1; if cur_idx == positions.len() { break; } cur_pos = positions[cur_idx]; } } // add values for things that won't lift at the end while positions.len() > return_positions.len() { return_positions.push(None); } assert_eq!(positions.len(), return_positions.len()); return_positions } pub fn lift_reference_positions_exact( record: &bam::Record, query_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { query_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, query_positions, false) } } pub fn lift_query_positions_exact( record: &bam::Record, reference_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped()
else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, reference_positions, true) } }
{ reference_positions.iter().map(|_x| None).collect() }
conditional_block
lib.rs
use itertools::multiunzip; use rust_htslib::{bam, bam::ext::BamRecordExtensions}; use std::collections::HashMap; use std::fmt::{Debug, Display}; /// Merge two lists into a sorted list /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T> where T: Ord, T: Clone, { let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect(); x.sort(); x } /// Merge two lists based on a key /// Normal sort is supposed to be very fast on two sorted lists /// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6> /// ``` /// use bamlift::*; /// let x = vec![1,3]; /// let x_q = vec!["a","b"]; /// let y = vec![2,4]; /// let y_q = vec!["c", "d"]; /// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q); /// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]); /// ``` pub fn merge_two_lists_with_qual<T, U>( left: &[T], left_q: &[U], right: &[T], right_q: &[U], ) -> Vec<(T, U)> where T: Ord, T: Clone, U: Clone, { let l = left .iter() .zip(left_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let r = right .iter() .zip(right_q.iter()) .map(|(a, b)| (a.clone(), b.clone())); let mut x: Vec<(T, U)> = l.chain(r).collect(); x.sort_by_key(|(a, _b)| a.clone()); x } /// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence( record: &bam::Record, input_positions: &[i64], ) -> Vec<i64>
/// get positions on the complimented sequence in the cigar record pub fn positions_on_complimented_sequence_in_place( record: &bam::Record, input_positions: &mut Vec<i64>, part_of_range: bool, ) { if !record.is_reverse() { return; } let seq_len = i64::try_from(record.seq_len()).unwrap(); // need to correct for going from [) to (] if we are part of a range let offset = if part_of_range { 0 } else { 1 }; for p in input_positions.iter_mut() { *p = seq_len - *p - offset; } input_positions.reverse(); } #[inline(always)] pub fn is_sorted<T>(v: &[T]) -> bool where T: Ord, { v.windows(2).all(|w| w[0] <= w[1]) } /// search a sorted array for insertions positions of another sorted array /// returned index i satisfies /// left /// a\[i-1\] < v <= a\[i\] /// right /// a\[i-1\] <= v < a\[i\] /// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html> /// ``` /// use bamlift::*; /// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10]; /// let v = vec![0, 1, 3, 4, 11, 11]; /// let indexes = search_sorted(&a, &v); /// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]); /// ``` pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize> where T: Ord, T: Display, [T]: Debug, { if !is_sorted(v) { panic!("v is not sorted: {:?}", v); } let mut indexes = Vec::with_capacity(v.len()); let mut a_idx = 0; for cur_v in v { while a_idx < a.len() { // check starting condition if a_idx == 0 && *cur_v <= a[a_idx] { indexes.push(0); break; } else if a_idx == 0 { a_idx += 1; } // end condition if a_idx == a.len() - 1 && *cur_v > a[a_idx] { indexes.push(a_idx + 1); break; } // middle of the array else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) { indexes.push(a_idx); break; } a_idx += 1; } } log::trace!("search_sorted: {:?}\n{:?}", v, indexes); indexes } // // CLOSEST LIFTOVER FUNCTIONS // /// this is a helper function for liftover_closest that should only be called from there /// The exception for this is test cases, where it should be easier to test this function /// directly. fn liftover_closest( positions: &[i64], aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, ) -> Vec<Option<i64>> { // skip empty if positions.is_empty() { return vec![]; } if aligned_block_pairs.is_empty() { return positions.iter().map(|_x| None).collect(); } assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the closest position for every position let mut starting_block = 0; let ending_block = aligned_block_pairs.len(); let mut pos_mapping = HashMap::new(); for cur_pos in positions { pos_mapping.insert(cur_pos, (-1, i64::MAX)); let mut current_block = 0; for block_index in starting_block..ending_block { // get the current alignment block let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index]; // get the previous closest position let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap(); // exact match found if cur_pos >= &q_st && cur_pos < &q_en { let dist_from_start = cur_pos - q_st; *best_diff = 0; *best_r_pos = r_st + dist_from_start; break; } // we are before the start of the block else if cur_pos < &q_st { let diff = (q_st - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_st; } } // we are past the end of the block else if cur_pos >= &q_en { let diff = (q_en - cur_pos).abs(); if diff < *best_diff { *best_diff = diff; *best_r_pos = *r_en; } // we don't need to return to previous blocks since the input is sorted starting_block = current_block; } current_block += 1; } } let mut rtn = vec![]; for q_pos in positions { let (r_pos, diff) = pos_mapping.get(q_pos).unwrap(); if *r_pos == -1 && *diff == i64::MAX { rtn.push(None); } else { rtn.push(Some(*r_pos)); } } assert_eq!(rtn.len(), positions.len()); rtn } /// find the closest reference positions for a list of query positions pub fn lift_reference_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, query_positions: &[i64], ) -> Vec<Option<i64>> { liftover_closest(query_positions, aligned_block_pairs) } /// find the closest query positions for a list of reference positions pub fn lift_query_positions( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, reference_positions: &[i64], ) -> Vec<Option<i64>> { // if lifting to the query, we need to reverse the pairs let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect(); liftover_closest(reference_positions, &aligned_block_pairs) } fn lift_range( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, starts: &[i64], ends: &[i64], lift_reference_to_query: bool, ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { assert_eq!(starts.len(), ends.len()); let (ref_starts, ref_ends) = if !lift_reference_to_query { ( lift_reference_positions(aligned_block_pairs, starts), lift_reference_positions(aligned_block_pairs, ends), ) } else { ( lift_query_positions(aligned_block_pairs, starts), lift_query_positions(aligned_block_pairs, ends), ) }; assert_eq!(ref_starts.len(), ref_ends.len()); let rtn = ref_starts .into_iter() .zip(ref_ends.into_iter()) .map(|(start, end)| match (start, end) { (Some(start), Some(end)) => { if start == end { (None, None, None) } else { (Some(start), Some(end), Some(end - start)) } } _ => (None, None, None), }) .collect::<Vec<_>>(); multiunzip(rtn) } /// Find the closest range but hopefully better pub fn lift_query_range( record: &bam::Record, starts: &[i64], ends: &[i64], ) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) { // get the aligned block pairs let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); lift_range(&aligned_block_pairs, starts, ends, false) } // // EXACT LIFTOVER FUNCTIONS // /// liftover positions using the cigar string fn liftover_exact( aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>, positions: &[i64], lift_reference_to_query: bool, ) -> Vec<Option<i64>> { assert!( is_sorted(positions), "Positions must be sorted before calling liftover!" ); // find the shared positions in the reference let mut return_positions = vec![]; let mut cur_idx = 0; // ends are not inclusive, I checked. for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs { let (st, en) = if !lift_reference_to_query { (q_st, q_en) } else { (r_st, r_en) }; // check bounds if cur_idx == positions.len() { break; } let mut cur_pos = positions[cur_idx]; // need to go to the next block while cur_pos < *en { if cur_pos >= *st { let dist_from_start = cur_pos - st; let rtn_pos = if !lift_reference_to_query { r_st + dist_from_start } else { q_st + dist_from_start }; return_positions.push(Some(rtn_pos)); } else { return_positions.push(None); } // reset current position cur_idx += 1; if cur_idx == positions.len() { break; } cur_pos = positions[cur_idx]; } } // add values for things that won't lift at the end while positions.len() > return_positions.len() { return_positions.push(None); } assert_eq!(positions.len(), return_positions.len()); return_positions } pub fn lift_reference_positions_exact( record: &bam::Record, query_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { query_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, query_positions, false) } } pub fn lift_query_positions_exact( record: &bam::Record, reference_positions: &[i64], ) -> Vec<Option<i64>> { if record.is_unmapped() { reference_positions.iter().map(|_x| None).collect() } else { let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect(); liftover_exact(&aligned_block_pairs, reference_positions, true) } }
{ // reverse positions if needed let positions: Vec<i64> = if record.is_reverse() { let seq_len = i64::try_from(record.seq_len()).unwrap(); input_positions .iter() .rev() .map(|p| seq_len - p - 1) .collect() } else { input_positions.to_vec() }; positions }
identifier_body
chown.rs
// This file is part of the uutils coreutils package. // // (c) Jian Zeng <anonymousknight96@gmail.com> // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid #[macro_use] extern crate uucore; pub use uucore::entries::{self, Group, Locate, Passwd}; use uucore::fs::resolve_relative_path; use uucore::libc::{gid_t, uid_t}; use uucore::perms::{wrap_chown, Verbosity}; use uucore::error::{FromIo, UResult, USimpleError}; use clap::{crate_version, App, Arg}; use walkdir::WalkDir; use std::fs::{self, Metadata}; use std::os::unix::fs::MetadataExt; use std::convert::AsRef; use std::path::Path; use uucore::InvalidEncodingHandling; static ABOUT: &str = "change file owner and group"; pub mod options { pub mod verbosity { pub static CHANGES: &str = "changes"; pub static QUIET: &str = "quiet"; pub static SILENT: &str = "silent"; pub static VERBOSE: &str = "verbose"; } pub mod preserve_root { pub static PRESERVE: &str = "preserve-root"; pub static NO_PRESERVE: &str = "no-preserve-root"; } pub mod dereference { pub static DEREFERENCE: &str = "dereference"; pub static NO_DEREFERENCE: &str = "no-dereference"; } pub static FROM: &str = "from"; pub static RECURSIVE: &str = "recursive"; pub mod traverse { pub static TRAVERSE: &str = "H"; pub static NO_TRAVERSE: &str = "P"; pub static EVERY: &str = "L"; } pub static REFERENCE: &str = "reference"; } static ARG_OWNER: &str = "owner"; static ARG_FILES: &str = "files"; const FTS_COMFOLLOW: u8 = 1; const FTS_PHYSICAL: u8 = 1 << 1; const FTS_LOGICAL: u8 = 1 << 2; fn get_usage() -> String { format!( "{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...", executable!() ) } #[uucore_procs::gen_uumain] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args .collect_str(InvalidEncodingHandling::Ignore) .accept_any(); let usage = get_usage(); let matches = uu_app().usage(&usage[..]).get_matches_from(args); /* First arg is the owner/group */ let owner = matches.value_of(ARG_OWNER).unwrap(); /* Then the list of files */ let files: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let preserve_root = matches.is_present(options::preserve_root::PRESERVE); let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) { 1 } else { 0 }; let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) { FTS_COMFOLLOW | FTS_PHYSICAL } else if matches.is_present(options::traverse::EVERY) { FTS_LOGICAL } else { FTS_PHYSICAL }; let recursive = matches.is_present(options::RECURSIVE); if recursive { if bit_flag == FTS_PHYSICAL { if derefer == 1 { return Err(USimpleError::new(1, "-R --dereference requires -H or -L")); } derefer = 0; } } else { bit_flag = FTS_PHYSICAL; } let verbosity = if matches.is_present(options::verbosity::CHANGES) { Verbosity::Changes } else if matches.is_present(options::verbosity::SILENT) || matches.is_present(options::verbosity::QUIET) { Verbosity::Silent } else if matches.is_present(options::verbosity::VERBOSE) { Verbosity::Verbose } else { Verbosity::Normal }; let filter = if let Some(spec) = matches.value_of(options::FROM) { match parse_spec(spec)? { (Some(uid), None) => IfFrom::User(uid), (None, Some(gid)) => IfFrom::Group(gid), (Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid), (None, None) => IfFrom::All, } } else { IfFrom::All }; let dest_uid: Option<u32>; let dest_gid: Option<u32>; if let Some(file) = matches.value_of(options::REFERENCE) { let meta = fs::metadata(&file) .map_err_context(|| format!("failed to get attributes of '{}'", file))?; dest_gid = Some(meta.gid()); dest_uid = Some(meta.uid()); } else { let (u, g) = parse_spec(owner)?; dest_uid = u; dest_gid = g; } let executor = Chowner { bit_flag, dest_uid, dest_gid, verbosity, recursive, dereference: derefer != 0, filter, preserve_root, files, }; executor.exec() } pub fn uu_app() -> App<'static, 'static> { App::new(executable!()) .version(crate_version!()) .about(ABOUT) .arg( Arg::with_name(options::verbosity::CHANGES) .short("c") .long(options::verbosity::CHANGES) .help("like verbose but report only when a change is made"), ) .arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help( "affect the referent of each symbolic link (this is the default), rather than the symbolic link itself", )) .arg( Arg::with_name(options::dereference::NO_DEREFERENCE) .short("h") .long(options::dereference::NO_DEREFERENCE) .help( "affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)", ), ) .arg( Arg::with_name(options::FROM) .long(options::FROM) .help( "change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute", ) .value_name("CURRENT_OWNER:CURRENT_GROUP"), ) .arg( Arg::with_name(options::preserve_root::PRESERVE) .long(options::preserve_root::PRESERVE) .help("fail to operate recursively on '/'"), ) .arg( Arg::with_name(options::preserve_root::NO_PRESERVE) .long(options::preserve_root::NO_PRESERVE) .help("do not treat '/' specially (the default)"), ) .arg( Arg::with_name(options::verbosity::QUIET) .long(options::verbosity::QUIET) .help("suppress most error messages"), ) .arg( Arg::with_name(options::RECURSIVE) .short("R") .long(options::RECURSIVE) .help("operate on files and directories recursively"), ) .arg( Arg::with_name(options::REFERENCE) .long(options::REFERENCE) .help("use RFILE's owner and group rather than specifying OWNER:GROUP values") .value_name("RFILE") .min_values(1), ) .arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT)) .arg( Arg::with_name(options::traverse::TRAVERSE) .short(options::traverse::TRAVERSE) .help("if a command line argument is a symbolic link to a directory, traverse it") .overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]), ) .arg( Arg::with_name(options::traverse::EVERY) .short(options::traverse::EVERY) .help("traverse every symbolic link to a directory encountered") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]), ) .arg( Arg::with_name(options::traverse::NO_TRAVERSE) .short(options::traverse::NO_TRAVERSE) .help("do not traverse any symbolic links (default)") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]), ) .arg( Arg::with_name(options::verbosity::VERBOSE) .long(options::verbosity::VERBOSE) .help("output a diagnostic for every file processed"), ) .arg( Arg::with_name(ARG_OWNER) .multiple(false) .takes_value(true) .required(true), ) .arg( Arg::with_name(ARG_FILES) .multiple(true) .takes_value(true) .required(true) .min_values(1), ) } fn
(spec: &str) -> UResult<(Option<u32>, Option<u32>)> { let args = spec.split_terminator(':').collect::<Vec<_>>(); let usr_only = args.len() == 1 && !args[0].is_empty(); let grp_only = args.len() == 2 && args[0].is_empty(); let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty(); let uid = if usr_only || usr_grp { Some( Passwd::locate(args[0]) .map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))? .uid(), ) } else { None }; let gid = if grp_only || usr_grp { Some( Group::locate(args[1]) .map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))? .gid(), ) } else { None }; Ok((uid, gid)) } enum IfFrom { All, User(u32), Group(u32), UserGroup(u32, u32), } struct Chowner { dest_uid: Option<u32>, dest_gid: Option<u32>, bit_flag: u8, verbosity: Verbosity, filter: IfFrom, files: Vec<String>, recursive: bool, preserve_root: bool, dereference: bool, } macro_rules! unwrap { ($m:expr, $e:ident, $err:block) => { match $m { Ok(meta) => meta, Err($e) => $err, } }; } impl Chowner { fn exec(&self) -> UResult<()> { let mut ret = 0; for f in &self.files { ret |= self.traverse(f); } if ret != 0 { return Err(ret.into()); } Ok(()) } fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 { let follow_arg = self.dereference || self.bit_flag != FTS_PHYSICAL; let path = root.as_ref(); let meta = match self.obtain_meta(path, follow_arg) { Some(m) => m, _ => return 1, }; // Prohibit only if: // (--preserve-root and -R present) && // ( // (argument is not symlink && resolved to be '/') || // (argument is symlink && should follow argument && resolved to be '/') // ) if self.recursive && self.preserve_root { let may_exist = if follow_arg { path.canonicalize().ok() } else { let real = resolve_relative_path(path); if real.is_dir() { Some(real.canonicalize().expect("failed to get real path")) } else { Some(real.into_owned()) } }; if let Some(p) = may_exist { if p.parent().is_none() { show_error!("it is dangerous to operate recursively on '/'"); show_error!("use --no-preserve-root to override this failsafe"); return 1; } } } let ret = if self.matched(meta.uid(), meta.gid()) { match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow_arg, self.verbosity.clone(), ) { Ok(n) => { if !n.is_empty() { show_error!("{}", n); } 0 } Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } else { 0 }; if !self.recursive { ret } else { ret | self.dive_into(&root) } } fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 { let mut ret = 0; let root = root.as_ref(); let follow = self.dereference || self.bit_flag & FTS_LOGICAL != 0; for entry in WalkDir::new(root).follow_links(follow).min_depth(1) { let entry = unwrap!(entry, e, { ret = 1; show_error!("{}", e); continue; }); let path = entry.path(); let meta = match self.obtain_meta(path, follow) { Some(m) => m, _ => { ret = 1; continue; } }; if !self.matched(meta.uid(), meta.gid()) { continue; } ret = match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow, self.verbosity.clone(), ) { Ok(n) => { if !n.is_empty() { show_error!("{}", n); } 0 } Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } ret } fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> { use self::Verbosity::*; let path = path.as_ref(); let meta = if follow { unwrap!(path.metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot access '{}': {}", path.display(), e), } return None; }) } else { unwrap!(path.symlink_metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot dereference '{}': {}", path.display(), e), } return None; }) }; Some(meta) } #[inline] fn matched(&self, uid: uid_t, gid: gid_t) -> bool { match self.filter { IfFrom::All => true, IfFrom::User(u) => u == uid, IfFrom::Group(g) => g == gid, IfFrom::UserGroup(u, g) => u == uid && g == gid, } } } #[cfg(test)] mod test { use super::*; #[test] fn test_parse_spec() { assert!(matches!(parse_spec(":"), Ok((None, None)))); assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: ")); } }
parse_spec
identifier_name
chown.rs
// This file is part of the uutils coreutils package. // // (c) Jian Zeng <anonymousknight96@gmail.com> // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid #[macro_use] extern crate uucore; pub use uucore::entries::{self, Group, Locate, Passwd}; use uucore::fs::resolve_relative_path; use uucore::libc::{gid_t, uid_t}; use uucore::perms::{wrap_chown, Verbosity}; use uucore::error::{FromIo, UResult, USimpleError}; use clap::{crate_version, App, Arg}; use walkdir::WalkDir; use std::fs::{self, Metadata}; use std::os::unix::fs::MetadataExt; use std::convert::AsRef; use std::path::Path; use uucore::InvalidEncodingHandling; static ABOUT: &str = "change file owner and group"; pub mod options { pub mod verbosity { pub static CHANGES: &str = "changes"; pub static QUIET: &str = "quiet"; pub static SILENT: &str = "silent"; pub static VERBOSE: &str = "verbose"; } pub mod preserve_root { pub static PRESERVE: &str = "preserve-root"; pub static NO_PRESERVE: &str = "no-preserve-root"; } pub mod dereference { pub static DEREFERENCE: &str = "dereference"; pub static NO_DEREFERENCE: &str = "no-dereference"; } pub static FROM: &str = "from"; pub static RECURSIVE: &str = "recursive"; pub mod traverse { pub static TRAVERSE: &str = "H"; pub static NO_TRAVERSE: &str = "P"; pub static EVERY: &str = "L"; } pub static REFERENCE: &str = "reference"; } static ARG_OWNER: &str = "owner"; static ARG_FILES: &str = "files"; const FTS_COMFOLLOW: u8 = 1; const FTS_PHYSICAL: u8 = 1 << 1; const FTS_LOGICAL: u8 = 1 << 2; fn get_usage() -> String { format!( "{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...", executable!() ) } #[uucore_procs::gen_uumain] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args .collect_str(InvalidEncodingHandling::Ignore) .accept_any(); let usage = get_usage(); let matches = uu_app().usage(&usage[..]).get_matches_from(args); /* First arg is the owner/group */ let owner = matches.value_of(ARG_OWNER).unwrap(); /* Then the list of files */ let files: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let preserve_root = matches.is_present(options::preserve_root::PRESERVE); let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) { 1 } else { 0 }; let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) { FTS_COMFOLLOW | FTS_PHYSICAL } else if matches.is_present(options::traverse::EVERY) { FTS_LOGICAL } else { FTS_PHYSICAL }; let recursive = matches.is_present(options::RECURSIVE); if recursive { if bit_flag == FTS_PHYSICAL { if derefer == 1 { return Err(USimpleError::new(1, "-R --dereference requires -H or -L")); } derefer = 0; } } else { bit_flag = FTS_PHYSICAL; } let verbosity = if matches.is_present(options::verbosity::CHANGES) { Verbosity::Changes } else if matches.is_present(options::verbosity::SILENT) || matches.is_present(options::verbosity::QUIET) { Verbosity::Silent } else if matches.is_present(options::verbosity::VERBOSE) { Verbosity::Verbose } else { Verbosity::Normal }; let filter = if let Some(spec) = matches.value_of(options::FROM) { match parse_spec(spec)? { (Some(uid), None) => IfFrom::User(uid), (None, Some(gid)) => IfFrom::Group(gid), (Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid), (None, None) => IfFrom::All, } } else { IfFrom::All }; let dest_uid: Option<u32>; let dest_gid: Option<u32>; if let Some(file) = matches.value_of(options::REFERENCE) { let meta = fs::metadata(&file) .map_err_context(|| format!("failed to get attributes of '{}'", file))?; dest_gid = Some(meta.gid()); dest_uid = Some(meta.uid()); } else { let (u, g) = parse_spec(owner)?; dest_uid = u; dest_gid = g; } let executor = Chowner { bit_flag, dest_uid, dest_gid, verbosity, recursive, dereference: derefer != 0, filter, preserve_root, files, }; executor.exec() } pub fn uu_app() -> App<'static, 'static> { App::new(executable!()) .version(crate_version!()) .about(ABOUT) .arg( Arg::with_name(options::verbosity::CHANGES) .short("c") .long(options::verbosity::CHANGES) .help("like verbose but report only when a change is made"), ) .arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help( "affect the referent of each symbolic link (this is the default), rather than the symbolic link itself", )) .arg( Arg::with_name(options::dereference::NO_DEREFERENCE) .short("h") .long(options::dereference::NO_DEREFERENCE) .help( "affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)", ), ) .arg( Arg::with_name(options::FROM) .long(options::FROM) .help( "change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute", ) .value_name("CURRENT_OWNER:CURRENT_GROUP"), ) .arg( Arg::with_name(options::preserve_root::PRESERVE) .long(options::preserve_root::PRESERVE) .help("fail to operate recursively on '/'"), ) .arg( Arg::with_name(options::preserve_root::NO_PRESERVE) .long(options::preserve_root::NO_PRESERVE) .help("do not treat '/' specially (the default)"), ) .arg( Arg::with_name(options::verbosity::QUIET) .long(options::verbosity::QUIET) .help("suppress most error messages"), ) .arg( Arg::with_name(options::RECURSIVE) .short("R") .long(options::RECURSIVE) .help("operate on files and directories recursively"), ) .arg( Arg::with_name(options::REFERENCE) .long(options::REFERENCE) .help("use RFILE's owner and group rather than specifying OWNER:GROUP values") .value_name("RFILE") .min_values(1), ) .arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT)) .arg( Arg::with_name(options::traverse::TRAVERSE) .short(options::traverse::TRAVERSE) .help("if a command line argument is a symbolic link to a directory, traverse it") .overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]), )
) .arg( Arg::with_name(options::traverse::NO_TRAVERSE) .short(options::traverse::NO_TRAVERSE) .help("do not traverse any symbolic links (default)") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]), ) .arg( Arg::with_name(options::verbosity::VERBOSE) .long(options::verbosity::VERBOSE) .help("output a diagnostic for every file processed"), ) .arg( Arg::with_name(ARG_OWNER) .multiple(false) .takes_value(true) .required(true), ) .arg( Arg::with_name(ARG_FILES) .multiple(true) .takes_value(true) .required(true) .min_values(1), ) } fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> { let args = spec.split_terminator(':').collect::<Vec<_>>(); let usr_only = args.len() == 1 && !args[0].is_empty(); let grp_only = args.len() == 2 && args[0].is_empty(); let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty(); let uid = if usr_only || usr_grp { Some( Passwd::locate(args[0]) .map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))? .uid(), ) } else { None }; let gid = if grp_only || usr_grp { Some( Group::locate(args[1]) .map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))? .gid(), ) } else { None }; Ok((uid, gid)) } enum IfFrom { All, User(u32), Group(u32), UserGroup(u32, u32), } struct Chowner { dest_uid: Option<u32>, dest_gid: Option<u32>, bit_flag: u8, verbosity: Verbosity, filter: IfFrom, files: Vec<String>, recursive: bool, preserve_root: bool, dereference: bool, } macro_rules! unwrap { ($m:expr, $e:ident, $err:block) => { match $m { Ok(meta) => meta, Err($e) => $err, } }; } impl Chowner { fn exec(&self) -> UResult<()> { let mut ret = 0; for f in &self.files { ret |= self.traverse(f); } if ret != 0 { return Err(ret.into()); } Ok(()) } fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 { let follow_arg = self.dereference || self.bit_flag != FTS_PHYSICAL; let path = root.as_ref(); let meta = match self.obtain_meta(path, follow_arg) { Some(m) => m, _ => return 1, }; // Prohibit only if: // (--preserve-root and -R present) && // ( // (argument is not symlink && resolved to be '/') || // (argument is symlink && should follow argument && resolved to be '/') // ) if self.recursive && self.preserve_root { let may_exist = if follow_arg { path.canonicalize().ok() } else { let real = resolve_relative_path(path); if real.is_dir() { Some(real.canonicalize().expect("failed to get real path")) } else { Some(real.into_owned()) } }; if let Some(p) = may_exist { if p.parent().is_none() { show_error!("it is dangerous to operate recursively on '/'"); show_error!("use --no-preserve-root to override this failsafe"); return 1; } } } let ret = if self.matched(meta.uid(), meta.gid()) { match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow_arg, self.verbosity.clone(), ) { Ok(n) => { if !n.is_empty() { show_error!("{}", n); } 0 } Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } else { 0 }; if !self.recursive { ret } else { ret | self.dive_into(&root) } } fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 { let mut ret = 0; let root = root.as_ref(); let follow = self.dereference || self.bit_flag & FTS_LOGICAL != 0; for entry in WalkDir::new(root).follow_links(follow).min_depth(1) { let entry = unwrap!(entry, e, { ret = 1; show_error!("{}", e); continue; }); let path = entry.path(); let meta = match self.obtain_meta(path, follow) { Some(m) => m, _ => { ret = 1; continue; } }; if !self.matched(meta.uid(), meta.gid()) { continue; } ret = match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow, self.verbosity.clone(), ) { Ok(n) => { if !n.is_empty() { show_error!("{}", n); } 0 } Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } ret } fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> { use self::Verbosity::*; let path = path.as_ref(); let meta = if follow { unwrap!(path.metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot access '{}': {}", path.display(), e), } return None; }) } else { unwrap!(path.symlink_metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot dereference '{}': {}", path.display(), e), } return None; }) }; Some(meta) } #[inline] fn matched(&self, uid: uid_t, gid: gid_t) -> bool { match self.filter { IfFrom::All => true, IfFrom::User(u) => u == uid, IfFrom::Group(g) => g == gid, IfFrom::UserGroup(u, g) => u == uid && g == gid, } } } #[cfg(test)] mod test { use super::*; #[test] fn test_parse_spec() { assert!(matches!(parse_spec(":"), Ok((None, None)))); assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: ")); } }
.arg( Arg::with_name(options::traverse::EVERY) .short(options::traverse::EVERY) .help("traverse every symbolic link to a directory encountered") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
random_line_split
chown.rs
// This file is part of the uutils coreutils package. // // (c) Jian Zeng <anonymousknight96@gmail.com> // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid #[macro_use] extern crate uucore; pub use uucore::entries::{self, Group, Locate, Passwd}; use uucore::fs::resolve_relative_path; use uucore::libc::{gid_t, uid_t}; use uucore::perms::{wrap_chown, Verbosity}; use uucore::error::{FromIo, UResult, USimpleError}; use clap::{crate_version, App, Arg}; use walkdir::WalkDir; use std::fs::{self, Metadata}; use std::os::unix::fs::MetadataExt; use std::convert::AsRef; use std::path::Path; use uucore::InvalidEncodingHandling; static ABOUT: &str = "change file owner and group"; pub mod options { pub mod verbosity { pub static CHANGES: &str = "changes"; pub static QUIET: &str = "quiet"; pub static SILENT: &str = "silent"; pub static VERBOSE: &str = "verbose"; } pub mod preserve_root { pub static PRESERVE: &str = "preserve-root"; pub static NO_PRESERVE: &str = "no-preserve-root"; } pub mod dereference { pub static DEREFERENCE: &str = "dereference"; pub static NO_DEREFERENCE: &str = "no-dereference"; } pub static FROM: &str = "from"; pub static RECURSIVE: &str = "recursive"; pub mod traverse { pub static TRAVERSE: &str = "H"; pub static NO_TRAVERSE: &str = "P"; pub static EVERY: &str = "L"; } pub static REFERENCE: &str = "reference"; } static ARG_OWNER: &str = "owner"; static ARG_FILES: &str = "files"; const FTS_COMFOLLOW: u8 = 1; const FTS_PHYSICAL: u8 = 1 << 1; const FTS_LOGICAL: u8 = 1 << 2; fn get_usage() -> String { format!( "{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...", executable!() ) } #[uucore_procs::gen_uumain] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args .collect_str(InvalidEncodingHandling::Ignore) .accept_any(); let usage = get_usage(); let matches = uu_app().usage(&usage[..]).get_matches_from(args); /* First arg is the owner/group */ let owner = matches.value_of(ARG_OWNER).unwrap(); /* Then the list of files */ let files: Vec<String> = matches .values_of(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let preserve_root = matches.is_present(options::preserve_root::PRESERVE); let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) { 1 } else { 0 }; let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) { FTS_COMFOLLOW | FTS_PHYSICAL } else if matches.is_present(options::traverse::EVERY) { FTS_LOGICAL } else { FTS_PHYSICAL }; let recursive = matches.is_present(options::RECURSIVE); if recursive { if bit_flag == FTS_PHYSICAL { if derefer == 1 { return Err(USimpleError::new(1, "-R --dereference requires -H or -L")); } derefer = 0; } } else { bit_flag = FTS_PHYSICAL; } let verbosity = if matches.is_present(options::verbosity::CHANGES) { Verbosity::Changes } else if matches.is_present(options::verbosity::SILENT) || matches.is_present(options::verbosity::QUIET) { Verbosity::Silent } else if matches.is_present(options::verbosity::VERBOSE) { Verbosity::Verbose } else { Verbosity::Normal }; let filter = if let Some(spec) = matches.value_of(options::FROM) { match parse_spec(spec)? { (Some(uid), None) => IfFrom::User(uid), (None, Some(gid)) => IfFrom::Group(gid), (Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid), (None, None) => IfFrom::All, } } else { IfFrom::All }; let dest_uid: Option<u32>; let dest_gid: Option<u32>; if let Some(file) = matches.value_of(options::REFERENCE) { let meta = fs::metadata(&file) .map_err_context(|| format!("failed to get attributes of '{}'", file))?; dest_gid = Some(meta.gid()); dest_uid = Some(meta.uid()); } else { let (u, g) = parse_spec(owner)?; dest_uid = u; dest_gid = g; } let executor = Chowner { bit_flag, dest_uid, dest_gid, verbosity, recursive, dereference: derefer != 0, filter, preserve_root, files, }; executor.exec() } pub fn uu_app() -> App<'static, 'static> { App::new(executable!()) .version(crate_version!()) .about(ABOUT) .arg( Arg::with_name(options::verbosity::CHANGES) .short("c") .long(options::verbosity::CHANGES) .help("like verbose but report only when a change is made"), ) .arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help( "affect the referent of each symbolic link (this is the default), rather than the symbolic link itself", )) .arg( Arg::with_name(options::dereference::NO_DEREFERENCE) .short("h") .long(options::dereference::NO_DEREFERENCE) .help( "affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)", ), ) .arg( Arg::with_name(options::FROM) .long(options::FROM) .help( "change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute", ) .value_name("CURRENT_OWNER:CURRENT_GROUP"), ) .arg( Arg::with_name(options::preserve_root::PRESERVE) .long(options::preserve_root::PRESERVE) .help("fail to operate recursively on '/'"), ) .arg( Arg::with_name(options::preserve_root::NO_PRESERVE) .long(options::preserve_root::NO_PRESERVE) .help("do not treat '/' specially (the default)"), ) .arg( Arg::with_name(options::verbosity::QUIET) .long(options::verbosity::QUIET) .help("suppress most error messages"), ) .arg( Arg::with_name(options::RECURSIVE) .short("R") .long(options::RECURSIVE) .help("operate on files and directories recursively"), ) .arg( Arg::with_name(options::REFERENCE) .long(options::REFERENCE) .help("use RFILE's owner and group rather than specifying OWNER:GROUP values") .value_name("RFILE") .min_values(1), ) .arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT)) .arg( Arg::with_name(options::traverse::TRAVERSE) .short(options::traverse::TRAVERSE) .help("if a command line argument is a symbolic link to a directory, traverse it") .overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]), ) .arg( Arg::with_name(options::traverse::EVERY) .short(options::traverse::EVERY) .help("traverse every symbolic link to a directory encountered") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]), ) .arg( Arg::with_name(options::traverse::NO_TRAVERSE) .short(options::traverse::NO_TRAVERSE) .help("do not traverse any symbolic links (default)") .overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]), ) .arg( Arg::with_name(options::verbosity::VERBOSE) .long(options::verbosity::VERBOSE) .help("output a diagnostic for every file processed"), ) .arg( Arg::with_name(ARG_OWNER) .multiple(false) .takes_value(true) .required(true), ) .arg( Arg::with_name(ARG_FILES) .multiple(true) .takes_value(true) .required(true) .min_values(1), ) } fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> { let args = spec.split_terminator(':').collect::<Vec<_>>(); let usr_only = args.len() == 1 && !args[0].is_empty(); let grp_only = args.len() == 2 && args[0].is_empty(); let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty(); let uid = if usr_only || usr_grp { Some( Passwd::locate(args[0]) .map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))? .uid(), ) } else { None }; let gid = if grp_only || usr_grp { Some( Group::locate(args[1]) .map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))? .gid(), ) } else { None }; Ok((uid, gid)) } enum IfFrom { All, User(u32), Group(u32), UserGroup(u32, u32), } struct Chowner { dest_uid: Option<u32>, dest_gid: Option<u32>, bit_flag: u8, verbosity: Verbosity, filter: IfFrom, files: Vec<String>, recursive: bool, preserve_root: bool, dereference: bool, } macro_rules! unwrap { ($m:expr, $e:ident, $err:block) => { match $m { Ok(meta) => meta, Err($e) => $err, } }; } impl Chowner { fn exec(&self) -> UResult<()> { let mut ret = 0; for f in &self.files { ret |= self.traverse(f); } if ret != 0 { return Err(ret.into()); } Ok(()) } fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 { let follow_arg = self.dereference || self.bit_flag != FTS_PHYSICAL; let path = root.as_ref(); let meta = match self.obtain_meta(path, follow_arg) { Some(m) => m, _ => return 1, }; // Prohibit only if: // (--preserve-root and -R present) && // ( // (argument is not symlink && resolved to be '/') || // (argument is symlink && should follow argument && resolved to be '/') // ) if self.recursive && self.preserve_root { let may_exist = if follow_arg { path.canonicalize().ok() } else { let real = resolve_relative_path(path); if real.is_dir() { Some(real.canonicalize().expect("failed to get real path")) } else { Some(real.into_owned()) } }; if let Some(p) = may_exist { if p.parent().is_none() { show_error!("it is dangerous to operate recursively on '/'"); show_error!("use --no-preserve-root to override this failsafe"); return 1; } } } let ret = if self.matched(meta.uid(), meta.gid()) { match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow_arg, self.verbosity.clone(), ) { Ok(n) => { if !n.is_empty() { show_error!("{}", n); } 0 } Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } else { 0 }; if !self.recursive { ret } else { ret | self.dive_into(&root) } } fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 { let mut ret = 0; let root = root.as_ref(); let follow = self.dereference || self.bit_flag & FTS_LOGICAL != 0; for entry in WalkDir::new(root).follow_links(follow).min_depth(1) { let entry = unwrap!(entry, e, { ret = 1; show_error!("{}", e); continue; }); let path = entry.path(); let meta = match self.obtain_meta(path, follow) { Some(m) => m, _ => { ret = 1; continue; } }; if !self.matched(meta.uid(), meta.gid()) { continue; } ret = match wrap_chown( path, &meta, self.dest_uid, self.dest_gid, follow, self.verbosity.clone(), ) { Ok(n) =>
Err(e) => { if self.verbosity != Verbosity::Silent { show_error!("{}", e); } 1 } } } ret } fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> { use self::Verbosity::*; let path = path.as_ref(); let meta = if follow { unwrap!(path.metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot access '{}': {}", path.display(), e), } return None; }) } else { unwrap!(path.symlink_metadata(), e, { match self.verbosity { Silent => (), _ => show_error!("cannot dereference '{}': {}", path.display(), e), } return None; }) }; Some(meta) } #[inline] fn matched(&self, uid: uid_t, gid: gid_t) -> bool { match self.filter { IfFrom::All => true, IfFrom::User(u) => u == uid, IfFrom::Group(g) => g == gid, IfFrom::UserGroup(u, g) => u == uid && g == gid, } } } #[cfg(test)] mod test { use super::*; #[test] fn test_parse_spec() { assert!(matches!(parse_spec(":"), Ok((None, None)))); assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: ")); } }
{ if !n.is_empty() { show_error!("{}", n); } 0 }
conditional_block
bls12_377_scalar.rs
//! This module implements field arithmetic for BLS12-377's scalar field. use std::cmp::Ordering::Less; use std::convert::TryInto; use std::ops::{Add, Div, Mul, Neg, Sub}; use rand::Rng; use unroll::unroll_for_loops; use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng}; use crate::nonzero_multiplicative_inverse; use std::cmp::Ordering; use std::fmt; use std::fmt::{Display, Formatter}; /// An element of the BLS12 group's scalar field. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] pub struct Bls12377Scalar { /// Montgomery representation, encoded with little-endian u64 limbs. pub limbs: [u64; 4], } impl Bls12377Scalar { /// The order of the field: /// 8444461749428370424248824938781546531375899335154063827935233455917409239041 pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398]; /// R in the context of the Montgomery reduction, i.e. 2^256 % |F|. pub(crate) const R: [u64; 4] = [9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437]; /// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|. pub(crate) const R2: [u64; 4] = [2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129]; /// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|. pub(crate) const R3: [u64; 4] = [7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890]; /// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64. const MU: u64 = 725501752471715839; pub fn from_canonical(c: [u64; 4]) -> Self { // We compute M(c, R^2) = c * R^2 * R^-1 = c * R. Self { limbs: Self::montgomery_multiply(c, Self::R2) } } pub fn to_canonical(&self) -> [u64; 4] { // Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x. Self::montgomery_multiply(self.limbs, [1, 0, 0, 0]) } #[unroll_for_loops] fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] { // Interleaved Montgomery multiplication, as described in Algorithm 2 of // https://eprint.iacr.org/2017/1057.pdf // Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least // significant digit and wrap around. let mut c = [0u64; 5]; for i in 0..4 { // Add a[i] b to c. let mut carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; // q = u c mod r = u c[0] mod r. let q = Self::MU.wrapping_mul(c[i]); // C += N q carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; debug_assert_eq!(c[i], 0); } let mut result = [c[4], c[0], c[1], c[2]]; // Final conditional subtraction. if cmp(result, Self::ORDER) != Less { result = sub(result, Self::ORDER); } result } } impl Add<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn add(self, rhs: Self) -> Self { // First we do a widening addition, then we reduce if necessary. let sum = add_no_overflow(self.limbs, rhs.limbs); let limbs = if cmp(sum, Self::ORDER) == Less { sum } else { sub(sum, Self::ORDER) }; Self { limbs } } } impl Sub<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn sub(self, rhs: Self) -> Self { let limbs = if cmp(self.limbs, rhs.limbs) == Less { // Underflow occurs, so we compute the difference as `self + (-rhs)`. add_no_overflow(self.limbs, (-rhs).limbs) } else { // No underflow, so it's faster to subtract directly. sub(self.limbs, rhs.limbs) }; Self { limbs } } } impl Mul<Self> for Bls12377Scalar { type Output = Self; fn mul(self, rhs: Self) -> Self { Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) } } } impl Div<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn div(self, rhs: Self) -> Self { self * rhs.multiplicative_inverse().expect("No inverse") } } impl Neg for Bls12377Scalar { type Output = Self; fn neg(self) -> Self { if self == Self::ZERO { Self::ZERO } else {
} } impl Field for Bls12377Scalar { const BITS: usize = 253; const BYTES: usize = 32; const ZERO: Self = Self { limbs: [0; 4] }; const ONE: Self = Self { limbs: Self::R }; const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] }; const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] }; const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] }; const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] }; const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] }; const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; /// x^11 is a permutation in this field. const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; const TWO_ADICITY: usize = 47; /// 60001509534603559531609739528203892656505753216962260608619555 const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] }; fn to_canonical_u64_vec(&self) -> Vec<u64> { self.to_canonical().to_vec() } fn from_canonical_u64_vec(v: Vec<u64>) -> Self { Self::from_canonical(v[..].try_into().unwrap()) } fn from_canonical_u64(n: u64) -> Self { Self::from_canonical([n, 0, 0, 0]) } fn is_valid_canonical_u64(v: &[u64]) -> bool { v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less } fn multiplicative_inverse_assuming_nonzero(&self) -> Self { // Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R. let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER); Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) } } fn rand() -> Self { Self { limbs: rand_range(Self::ORDER), } } fn rand_from_rng<R: Rng>(rng: &mut R) -> Self { Self { limbs: rand_range_from_rng(Self::ORDER, rng), } } } impl Ord for Bls12377Scalar { fn cmp(&self, other: &Self) -> Ordering { self.cmp_helper(other) } } impl PartialOrd for Bls12377Scalar { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Display for Bls12377Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { field_to_biguint(*self).fmt(f) } } #[cfg(test)] mod tests { use crate::{Bls12377Scalar, Field}; use crate::conversions::u64_slice_to_biguint; use crate::test_arithmetic; #[test] fn bls12scalar_to_and_from_canonical() { let a = [1, 2, 3, 4]; let a_biguint = u64_slice_to_biguint(&a); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R); let a_bls12scalar = Bls12377Scalar::from_canonical(a); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs), &a_biguint * &r_biguint % &order_biguint); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint); } #[test] fn mul_bls12_scalar() { let a = [1, 2, 3, 4]; let b = [3, 4, 5, 6]; let a_biguint = u64_slice_to_biguint(&a); let b_biguint = u64_slice_to_biguint(&b); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let a_blsbase = Bls12377Scalar::from_canonical(a); let b_blsbase = Bls12377Scalar::from_canonical(b); assert_eq!( u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()), a_biguint * b_biguint % order_biguint); } #[test] fn test_bls12_rand() { let random_element = Bls12377Scalar::rand(); for i in 0..4 { assert_ne!(random_element.limbs[i], 0x0); } } #[test] fn exp() { assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9)); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27)); } #[test] fn negation() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO); } } #[test] fn multiplicative_inverse() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); let i_inv_blsscalar = i_blsscalar.multiplicative_inverse(); if i == 0 { assert!(i_inv_blsscalar.is_none()); } else { assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE); } } } #[test] fn batch_multiplicative_inverse() { let mut x = Vec::new(); for i in 1..25 { x.push(Bls12377Scalar::from_canonical_u64(i)); } let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x); assert_eq!(x.len(), x_inv.len()); for (x_i, x_i_inv) in x.into_iter().zip(x_inv) { assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE); } } #[test] fn num_bits() { assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5); assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64); assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5) } #[test] fn roots_of_unity() { for n_power in 0..10 { let n = 1 << n_power as u64; let root = Bls12377Scalar::primitive_root_of_unity(n_power); assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE); if n > 1 { assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE) } } } #[test] fn primitive_root_order() { for n_power in 0..10 { let root = Bls12377Scalar::primitive_root_of_unity(n_power); let order = Bls12377Scalar::generator_order(root); assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power); } } test_arithmetic!(crate::Bls12377Scalar); }
Self { limbs: sub(Self::ORDER, self.limbs) } }
conditional_block
bls12_377_scalar.rs
//! This module implements field arithmetic for BLS12-377's scalar field. use std::cmp::Ordering::Less; use std::convert::TryInto; use std::ops::{Add, Div, Mul, Neg, Sub}; use rand::Rng; use unroll::unroll_for_loops; use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng}; use crate::nonzero_multiplicative_inverse; use std::cmp::Ordering; use std::fmt; use std::fmt::{Display, Formatter}; /// An element of the BLS12 group's scalar field. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] pub struct Bls12377Scalar { /// Montgomery representation, encoded with little-endian u64 limbs. pub limbs: [u64; 4], } impl Bls12377Scalar { /// The order of the field: /// 8444461749428370424248824938781546531375899335154063827935233455917409239041 pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398]; /// R in the context of the Montgomery reduction, i.e. 2^256 % |F|. pub(crate) const R: [u64; 4] = [9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437]; /// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|. pub(crate) const R2: [u64; 4] = [2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129]; /// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|. pub(crate) const R3: [u64; 4] = [7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890]; /// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64. const MU: u64 = 725501752471715839; pub fn from_canonical(c: [u64; 4]) -> Self { // We compute M(c, R^2) = c * R^2 * R^-1 = c * R. Self { limbs: Self::montgomery_multiply(c, Self::R2) } } pub fn to_canonical(&self) -> [u64; 4] { // Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x. Self::montgomery_multiply(self.limbs, [1, 0, 0, 0]) } #[unroll_for_loops] fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] { // Interleaved Montgomery multiplication, as described in Algorithm 2 of // https://eprint.iacr.org/2017/1057.pdf // Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least // significant digit and wrap around. let mut c = [0u64; 5]; for i in 0..4 { // Add a[i] b to c. let mut carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; // q = u c mod r = u c[0] mod r. let q = Self::MU.wrapping_mul(c[i]); // C += N q carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; debug_assert_eq!(c[i], 0); } let mut result = [c[4], c[0], c[1], c[2]]; // Final conditional subtraction. if cmp(result, Self::ORDER) != Less { result = sub(result, Self::ORDER); } result } } impl Add<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn add(self, rhs: Self) -> Self { // First we do a widening addition, then we reduce if necessary. let sum = add_no_overflow(self.limbs, rhs.limbs); let limbs = if cmp(sum, Self::ORDER) == Less { sum } else { sub(sum, Self::ORDER) }; Self { limbs } } } impl Sub<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn sub(self, rhs: Self) -> Self { let limbs = if cmp(self.limbs, rhs.limbs) == Less { // Underflow occurs, so we compute the difference as `self + (-rhs)`. add_no_overflow(self.limbs, (-rhs).limbs) } else { // No underflow, so it's faster to subtract directly. sub(self.limbs, rhs.limbs) }; Self { limbs } } } impl Mul<Self> for Bls12377Scalar { type Output = Self; fn mul(self, rhs: Self) -> Self { Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) } } } impl Div<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn div(self, rhs: Self) -> Self { self * rhs.multiplicative_inverse().expect("No inverse") } } impl Neg for Bls12377Scalar { type Output = Self; fn neg(self) -> Self { if self == Self::ZERO { Self::ZERO } else { Self { limbs: sub(Self::ORDER, self.limbs) } } } } impl Field for Bls12377Scalar { const BITS: usize = 253; const BYTES: usize = 32; const ZERO: Self = Self { limbs: [0; 4] }; const ONE: Self = Self { limbs: Self::R }; const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] }; const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] }; const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] }; const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] }; const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] }; const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; /// x^11 is a permutation in this field. const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; const TWO_ADICITY: usize = 47; /// 60001509534603559531609739528203892656505753216962260608619555 const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] }; fn to_canonical_u64_vec(&self) -> Vec<u64> { self.to_canonical().to_vec() } fn from_canonical_u64_vec(v: Vec<u64>) -> Self { Self::from_canonical(v[..].try_into().unwrap()) } fn from_canonical_u64(n: u64) -> Self { Self::from_canonical([n, 0, 0, 0]) } fn is_valid_canonical_u64(v: &[u64]) -> bool { v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less } fn multiplicative_inverse_assuming_nonzero(&self) -> Self { // Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R. let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER); Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) } } fn rand() -> Self { Self { limbs: rand_range(Self::ORDER), } } fn rand_from_rng<R: Rng>(rng: &mut R) -> Self { Self { limbs: rand_range_from_rng(Self::ORDER, rng), } } } impl Ord for Bls12377Scalar { fn cmp(&self, other: &Self) -> Ordering { self.cmp_helper(other) } } impl PartialOrd for Bls12377Scalar { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Display for Bls12377Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { field_to_biguint(*self).fmt(f) } } #[cfg(test)] mod tests { use crate::{Bls12377Scalar, Field}; use crate::conversions::u64_slice_to_biguint; use crate::test_arithmetic; #[test] fn bls12scalar_to_and_from_canonical() { let a = [1, 2, 3, 4]; let a_biguint = u64_slice_to_biguint(&a); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R); let a_bls12scalar = Bls12377Scalar::from_canonical(a); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs), &a_biguint * &r_biguint % &order_biguint); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint); } #[test] fn mul_bls12_scalar() { let a = [1, 2, 3, 4]; let b = [3, 4, 5, 6]; let a_biguint = u64_slice_to_biguint(&a); let b_biguint = u64_slice_to_biguint(&b); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let a_blsbase = Bls12377Scalar::from_canonical(a); let b_blsbase = Bls12377Scalar::from_canonical(b); assert_eq!( u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()), a_biguint * b_biguint % order_biguint); } #[test] fn test_bls12_rand() { let random_element = Bls12377Scalar::rand(); for i in 0..4 { assert_ne!(random_element.limbs[i], 0x0); } } #[test] fn exp() { assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9)); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27)); } #[test] fn negation() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO); } } #[test] fn multiplicative_inverse() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); let i_inv_blsscalar = i_blsscalar.multiplicative_inverse(); if i == 0 { assert!(i_inv_blsscalar.is_none()); } else { assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE); } } } #[test] fn batch_multiplicative_inverse() { let mut x = Vec::new(); for i in 1..25 { x.push(Bls12377Scalar::from_canonical_u64(i)); } let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x); assert_eq!(x.len(), x_inv.len()); for (x_i, x_i_inv) in x.into_iter().zip(x_inv) { assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE); } } #[test] fn n
) { assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5); assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64); assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5) } #[test] fn roots_of_unity() { for n_power in 0..10 { let n = 1 << n_power as u64; let root = Bls12377Scalar::primitive_root_of_unity(n_power); assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE); if n > 1 { assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE) } } } #[test] fn primitive_root_order() { for n_power in 0..10 { let root = Bls12377Scalar::primitive_root_of_unity(n_power); let order = Bls12377Scalar::generator_order(root); assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power); } } test_arithmetic!(crate::Bls12377Scalar); }
um_bits(
identifier_name
bls12_377_scalar.rs
//! This module implements field arithmetic for BLS12-377's scalar field. use std::cmp::Ordering::Less; use std::convert::TryInto; use std::ops::{Add, Div, Mul, Neg, Sub}; use rand::Rng; use unroll::unroll_for_loops; use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng}; use crate::nonzero_multiplicative_inverse; use std::cmp::Ordering; use std::fmt; use std::fmt::{Display, Formatter}; /// An element of the BLS12 group's scalar field. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] pub struct Bls12377Scalar { /// Montgomery representation, encoded with little-endian u64 limbs. pub limbs: [u64; 4], } impl Bls12377Scalar { /// The order of the field: /// 8444461749428370424248824938781546531375899335154063827935233455917409239041 pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398]; /// R in the context of the Montgomery reduction, i.e. 2^256 % |F|. pub(crate) const R: [u64; 4] = [9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437]; /// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|. pub(crate) const R2: [u64; 4] = [2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129]; /// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|. pub(crate) const R3: [u64; 4] = [7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890]; /// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64. const MU: u64 = 725501752471715839; pub fn from_canonical(c: [u64; 4]) -> Self { // We compute M(c, R^2) = c * R^2 * R^-1 = c * R. Self { limbs: Self::montgomery_multiply(c, Self::R2) } } pub fn to_canonical(&self) -> [u64; 4] { // Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x. Self::montgomery_multiply(self.limbs, [1, 0, 0, 0]) } #[unroll_for_loops] fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] { // Interleaved Montgomery multiplication, as described in Algorithm 2 of // https://eprint.iacr.org/2017/1057.pdf // Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least // significant digit and wrap around. let mut c = [0u64; 5]; for i in 0..4 { // Add a[i] b to c. let mut carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; // q = u c mod r = u c[0] mod r. let q = Self::MU.wrapping_mul(c[i]); // C += N q carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; debug_assert_eq!(c[i], 0); } let mut result = [c[4], c[0], c[1], c[2]]; // Final conditional subtraction. if cmp(result, Self::ORDER) != Less { result = sub(result, Self::ORDER); } result } } impl Add<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn add(self, rhs: Self) -> Self { // First we do a widening addition, then we reduce if necessary. let sum = add_no_overflow(self.limbs, rhs.limbs); let limbs = if cmp(sum, Self::ORDER) == Less { sum } else { sub(sum, Self::ORDER) }; Self { limbs } } } impl Sub<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn sub(self, rhs: Self) -> Self { let limbs = if cmp(self.limbs, rhs.limbs) == Less { // Underflow occurs, so we compute the difference as `self + (-rhs)`. add_no_overflow(self.limbs, (-rhs).limbs) } else { // No underflow, so it's faster to subtract directly. sub(self.limbs, rhs.limbs) }; Self { limbs } } } impl Mul<Self> for Bls12377Scalar { type Output = Self; fn mul(self, rhs: Self) -> Self { Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
} impl Div<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn div(self, rhs: Self) -> Self { self * rhs.multiplicative_inverse().expect("No inverse") } } impl Neg for Bls12377Scalar { type Output = Self; fn neg(self) -> Self { if self == Self::ZERO { Self::ZERO } else { Self { limbs: sub(Self::ORDER, self.limbs) } } } } impl Field for Bls12377Scalar { const BITS: usize = 253; const BYTES: usize = 32; const ZERO: Self = Self { limbs: [0; 4] }; const ONE: Self = Self { limbs: Self::R }; const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] }; const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] }; const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] }; const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] }; const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] }; const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; /// x^11 is a permutation in this field. const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; const TWO_ADICITY: usize = 47; /// 60001509534603559531609739528203892656505753216962260608619555 const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] }; fn to_canonical_u64_vec(&self) -> Vec<u64> { self.to_canonical().to_vec() } fn from_canonical_u64_vec(v: Vec<u64>) -> Self { Self::from_canonical(v[..].try_into().unwrap()) } fn from_canonical_u64(n: u64) -> Self { Self::from_canonical([n, 0, 0, 0]) } fn is_valid_canonical_u64(v: &[u64]) -> bool { v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less } fn multiplicative_inverse_assuming_nonzero(&self) -> Self { // Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R. let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER); Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) } } fn rand() -> Self { Self { limbs: rand_range(Self::ORDER), } } fn rand_from_rng<R: Rng>(rng: &mut R) -> Self { Self { limbs: rand_range_from_rng(Self::ORDER, rng), } } } impl Ord for Bls12377Scalar { fn cmp(&self, other: &Self) -> Ordering { self.cmp_helper(other) } } impl PartialOrd for Bls12377Scalar { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Display for Bls12377Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { field_to_biguint(*self).fmt(f) } } #[cfg(test)] mod tests { use crate::{Bls12377Scalar, Field}; use crate::conversions::u64_slice_to_biguint; use crate::test_arithmetic; #[test] fn bls12scalar_to_and_from_canonical() { let a = [1, 2, 3, 4]; let a_biguint = u64_slice_to_biguint(&a); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R); let a_bls12scalar = Bls12377Scalar::from_canonical(a); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs), &a_biguint * &r_biguint % &order_biguint); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint); } #[test] fn mul_bls12_scalar() { let a = [1, 2, 3, 4]; let b = [3, 4, 5, 6]; let a_biguint = u64_slice_to_biguint(&a); let b_biguint = u64_slice_to_biguint(&b); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let a_blsbase = Bls12377Scalar::from_canonical(a); let b_blsbase = Bls12377Scalar::from_canonical(b); assert_eq!( u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()), a_biguint * b_biguint % order_biguint); } #[test] fn test_bls12_rand() { let random_element = Bls12377Scalar::rand(); for i in 0..4 { assert_ne!(random_element.limbs[i], 0x0); } } #[test] fn exp() { assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9)); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27)); } #[test] fn negation() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO); } } #[test] fn multiplicative_inverse() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); let i_inv_blsscalar = i_blsscalar.multiplicative_inverse(); if i == 0 { assert!(i_inv_blsscalar.is_none()); } else { assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE); } } } #[test] fn batch_multiplicative_inverse() { let mut x = Vec::new(); for i in 1..25 { x.push(Bls12377Scalar::from_canonical_u64(i)); } let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x); assert_eq!(x.len(), x_inv.len()); for (x_i, x_i_inv) in x.into_iter().zip(x_inv) { assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE); } } #[test] fn num_bits() { assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5); assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64); assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5) } #[test] fn roots_of_unity() { for n_power in 0..10 { let n = 1 << n_power as u64; let root = Bls12377Scalar::primitive_root_of_unity(n_power); assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE); if n > 1 { assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE) } } } #[test] fn primitive_root_order() { for n_power in 0..10 { let root = Bls12377Scalar::primitive_root_of_unity(n_power); let order = Bls12377Scalar::generator_order(root); assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power); } } test_arithmetic!(crate::Bls12377Scalar); }
}
random_line_split
bls12_377_scalar.rs
//! This module implements field arithmetic for BLS12-377's scalar field. use std::cmp::Ordering::Less; use std::convert::TryInto; use std::ops::{Add, Div, Mul, Neg, Sub}; use rand::Rng; use unroll::unroll_for_loops; use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng}; use crate::nonzero_multiplicative_inverse; use std::cmp::Ordering; use std::fmt; use std::fmt::{Display, Formatter}; /// An element of the BLS12 group's scalar field. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] pub struct Bls12377Scalar { /// Montgomery representation, encoded with little-endian u64 limbs. pub limbs: [u64; 4], } impl Bls12377Scalar { /// The order of the field: /// 8444461749428370424248824938781546531375899335154063827935233455917409239041 pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398]; /// R in the context of the Montgomery reduction, i.e. 2^256 % |F|. pub(crate) const R: [u64; 4] = [9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437]; /// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|. pub(crate) const R2: [u64; 4] = [2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129]; /// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|. pub(crate) const R3: [u64; 4] = [7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890]; /// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64. const MU: u64 = 725501752471715839; pub fn from_canonical(c: [u64; 4]) -> Self { // We compute M(c, R^2) = c * R^2 * R^-1 = c * R. Self { limbs: Self::montgomery_multiply(c, Self::R2) } } pub fn to_canonical(&self) -> [u64; 4] { // Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x. Self::montgomery_multiply(self.limbs, [1, 0, 0, 0]) } #[unroll_for_loops] fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] { // Interleaved Montgomery multiplication, as described in Algorithm 2 of // https://eprint.iacr.org/2017/1057.pdf // Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least // significant digit and wrap around. let mut c = [0u64; 5]; for i in 0..4 { // Add a[i] b to c. let mut carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; // q = u c mod r = u c[0] mod r. let q = Self::MU.wrapping_mul(c[i]); // C += N q carry = 0; for j in 0..4 { let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128; c[(i + j) % 5] = result as u64; carry = (result >> 64) as u64; } c[(i + 4) % 5] += carry; debug_assert_eq!(c[i], 0); } let mut result = [c[4], c[0], c[1], c[2]]; // Final conditional subtraction. if cmp(result, Self::ORDER) != Less { result = sub(result, Self::ORDER); } result } } impl Add<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn add(self, rhs: Self) -> Self { // First we do a widening addition, then we reduce if necessary. let sum = add_no_overflow(self.limbs, rhs.limbs); let limbs = if cmp(sum, Self::ORDER) == Less { sum } else { sub(sum, Self::ORDER) }; Self { limbs } } } impl Sub<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn sub(self, rhs: Self) -> Self {
} impl Mul<Self> for Bls12377Scalar { type Output = Self; fn mul(self, rhs: Self) -> Self { Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) } } } impl Div<Bls12377Scalar> for Bls12377Scalar { type Output = Self; fn div(self, rhs: Self) -> Self { self * rhs.multiplicative_inverse().expect("No inverse") } } impl Neg for Bls12377Scalar { type Output = Self; fn neg(self) -> Self { if self == Self::ZERO { Self::ZERO } else { Self { limbs: sub(Self::ORDER, self.limbs) } } } } impl Field for Bls12377Scalar { const BITS: usize = 253; const BYTES: usize = 32; const ZERO: Self = Self { limbs: [0; 4] }; const ONE: Self = Self { limbs: Self::R }; const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] }; const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] }; const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] }; const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] }; const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] }; const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; /// x^11 is a permutation in this field. const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] }; const TWO_ADICITY: usize = 47; /// 60001509534603559531609739528203892656505753216962260608619555 const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] }; fn to_canonical_u64_vec(&self) -> Vec<u64> { self.to_canonical().to_vec() } fn from_canonical_u64_vec(v: Vec<u64>) -> Self { Self::from_canonical(v[..].try_into().unwrap()) } fn from_canonical_u64(n: u64) -> Self { Self::from_canonical([n, 0, 0, 0]) } fn is_valid_canonical_u64(v: &[u64]) -> bool { v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less } fn multiplicative_inverse_assuming_nonzero(&self) -> Self { // Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R. let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER); Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) } } fn rand() -> Self { Self { limbs: rand_range(Self::ORDER), } } fn rand_from_rng<R: Rng>(rng: &mut R) -> Self { Self { limbs: rand_range_from_rng(Self::ORDER, rng), } } } impl Ord for Bls12377Scalar { fn cmp(&self, other: &Self) -> Ordering { self.cmp_helper(other) } } impl PartialOrd for Bls12377Scalar { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Display for Bls12377Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { field_to_biguint(*self).fmt(f) } } #[cfg(test)] mod tests { use crate::{Bls12377Scalar, Field}; use crate::conversions::u64_slice_to_biguint; use crate::test_arithmetic; #[test] fn bls12scalar_to_and_from_canonical() { let a = [1, 2, 3, 4]; let a_biguint = u64_slice_to_biguint(&a); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R); let a_bls12scalar = Bls12377Scalar::from_canonical(a); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs), &a_biguint * &r_biguint % &order_biguint); assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint); } #[test] fn mul_bls12_scalar() { let a = [1, 2, 3, 4]; let b = [3, 4, 5, 6]; let a_biguint = u64_slice_to_biguint(&a); let b_biguint = u64_slice_to_biguint(&b); let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER); let a_blsbase = Bls12377Scalar::from_canonical(a); let b_blsbase = Bls12377Scalar::from_canonical(b); assert_eq!( u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()), a_biguint * b_biguint % order_biguint); } #[test] fn test_bls12_rand() { let random_element = Bls12377Scalar::rand(); for i in 0..4 { assert_ne!(random_element.limbs[i], 0x0); } } #[test] fn exp() { assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9)); assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27)); } #[test] fn negation() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO); } } #[test] fn multiplicative_inverse() { for i in 0..25 { let i_blsscalar = Bls12377Scalar::from_canonical_u64(i); let i_inv_blsscalar = i_blsscalar.multiplicative_inverse(); if i == 0 { assert!(i_inv_blsscalar.is_none()); } else { assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE); } } } #[test] fn batch_multiplicative_inverse() { let mut x = Vec::new(); for i in 1..25 { x.push(Bls12377Scalar::from_canonical_u64(i)); } let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x); assert_eq!(x.len(), x_inv.len()); for (x_i, x_i_inv) in x.into_iter().zip(x_inv) { assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE); } } #[test] fn num_bits() { assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5); assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64); assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1); assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5) } #[test] fn roots_of_unity() { for n_power in 0..10 { let n = 1 << n_power as u64; let root = Bls12377Scalar::primitive_root_of_unity(n_power); assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE); if n > 1 { assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE) } } } #[test] fn primitive_root_order() { for n_power in 0..10 { let root = Bls12377Scalar::primitive_root_of_unity(n_power); let order = Bls12377Scalar::generator_order(root); assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power); } } test_arithmetic!(crate::Bls12377Scalar); }
let limbs = if cmp(self.limbs, rhs.limbs) == Less { // Underflow occurs, so we compute the difference as `self + (-rhs)`. add_no_overflow(self.limbs, (-rhs).limbs) } else { // No underflow, so it's faster to subtract directly. sub(self.limbs, rhs.limbs) }; Self { limbs } }
identifier_body
poisson_kriging.py
import numpy as np from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_distance from pyinterpolate.kriging.helper_functions.euclidean_distance import block_to_block_distances from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_block_to_block_distance # TODO: remove matrix data structures class PKrige: """ Class for Poisson Kriging, Area-to-area (ATA) and Area-to-Point (ATP) Poisson Kriging interpolation of the unknown values in a given location (position). Class takes two arguments during the initialization: counties_data - population_data - semivariogram_model - Available methods: Static methods: Data visualization methods: Example how to prepare model: """ def __init__(self): """ Class for calculation of poisson kriging """ self.model = None self.joined_datasets = None self.id_col = None self.val_col = None self.pop_col = None self.total_population_per_unit = None self.centroids_of_areal_data = None self.prepared_data = None self.unknown_area_id = None # Parameters self.lags = None self.step = None self.min_no_of_observations = None self.max_search_radius = None # Data preparation functions def set_params(self, model, joined_datasets, population_series, centroids_dataset, id_col, val_col, pop_col, lags_number, lag_step_size, min_no_of_observations, search_radius): self.model = model self.joined_datasets = joined_datasets self.total_population_per_unit = population_series self.centroids_of_areal_data = centroids_dataset self.id_col = id_col self.val_col = val_col self.pop_col = pop_col self.lags = lags_number self.step = lag_step_size self.min_no_of_observations = min_no_of_observations self.max_search_radius = search_radius print('Parameters have been set') def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid, weighted=False, verbose=False): """ Function prepares data from unknown locations for Poisson Kriging. :param unknown_areal_data: PKData object (row) with areal and population data. :param weighted: distances weighted by population (True) or not (False), :param verbose: if True then method informs about the successful operation. :return prediction: prepared dataset which contains: [[x, y, value, known_area_id, distance_to_unknown_position], [...]], """ areal_id = unknown_areal_data_centroid[0][-1] cx_cy = unknown_areal_data_centroid[0][:2] r = np.array([cx_cy]) known_centroids = self.centroids_of_areal_data kc = known_centroids[:, :2] # Build set for Poisson Kriging if weighted: weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row, areal_id) s = [] for wd in weighted_distances: for k in known_centroids: if wd[1] in k: s.append(wd[0]) break else: pass s = np.array(s).T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn] else: distances_array = np.zeros(kc.shape) for i in range(0, r.shape[1]): distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2 s = distances_array.sum(axis=1) s = np.sqrt(s) s = s.T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn] # remove nans kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)] # sort by distance kriging_data = kriging_data[kriging_data[:, -1].argsort()] # set output by distance params # search radius max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius) output_data = kriging_data[:max_search_pos] # check number of observations if len(output_data) < self.min_no_of_observations: output_data = kriging_data[:self.min_no_of_observations] # TODO: info to the app logs # print('Dataset has been set based on the minimum number of observations') # set final dataset self.prepared_data = output_data if verbose: print('Predictions data prepared') def normalize_weights(self, weights, estimated_value, kriging_type): """ Algorithm for weight normalization to remove negative weights of the points which are clustering. Derived from Deutsch, C.V., Correcting for negative weights in ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996. :param: weights - weights matrix calculated with "normal" kriging procedure, :param: estimated_value - value estimated for a given, unknown point. :return: weight_matrix - normalized weight matrix where negative weights are removed and matrix is scalled to give a sum of all elements equal to 1. """ if kriging_type == 'ord': weight_matrix = weights[:-1].copy() output_matrix = weights[:-1].copy() elif kriging_type == 'sim': weight_matrix = weights.copy() output_matrix = weights.copy() else: print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.') weight_matrix = weights.copy() output_matrix = weights.copy() ###### Calculate average covariance between the location being ###### ###### estimated and the locations with negative weights ###### locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0 locs = locs[:, 0] # Calculate covariance between those points and unknown point if len(locs) >= 1: c = [] mu = 0 for i in locs: _c = estimated_value * self.prepared_data[i, 2] mu = mu + estimated_value + self.prepared_data[i, 2] c.append(_c) output_matrix[i, 0] = 0 mu = mu / len(c) cov = np.sum(c) / len(c) - mu * mu ###### Calculate absolute magnitude of the negative weights ##### w = weight_matrix[weight_matrix < 0] w = w.T magnitude = np.sum(np.abs(w)) / len(w) ###### Test values greater than 0 and check if they need to be ###### rescaled to 0 ###### ###### if weight > 0 and Covariance between unknown point and known ###### point is less than the average covariance between the location ###### being estimated and the locations with negative weights and ###### and weight is less than absolute magnitude of the negative ###### weights then set weight to zero ##### positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0 positive_locs = positive_locs[:, 0] for j in positive_locs: cov_est = (estimated_value * self.prepared_data[j, 2]) / 2 mu = (estimated_value + self.prepared_data[j, 2]) / 2 cov_est = cov_est - mu * mu if cov_est < cov: if weight_matrix[j, 0] < magnitude: output_matrix[j, 0] = 0 ###### Normalize weight matrix to get a sum of all elements equal to 1 ###### output_matrix = output_matrix / np.sum(output_matrix) return output_matrix else: return weights # Data processing private class methods def _calculate_weighted_distances(self, unknown_area, unknown_area_id): """Function calculates weighted distances between unknown area and known areas""" dist_dict = self._prepare_distances_dict(unknown_area) base_area = dist_dict[unknown_area_id] base_area_list = base_area['coordinates'] other_keys = list(dist_dict.keys()) weighted_distances = [] for k in other_keys: other_area_list = dist_dict[k]['coordinates'] dist = calculate_block_to_block_distance(base_area_list, other_area_list) weighted_distances.append([dist, k]) return weighted_distances def _prepare_distances_dict(self, unknown_area): """Function prepares dict with distances for weighted distance calculation between areas""" new_d = self.joined_datasets.copy() new_d = new_d.append(unknown_area, ignore_index=True) try: new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x) new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y) except TypeError: new_d['px'] = new_d['geometry'].apply(lambda v: v.x) new_d['py'] = new_d['geometry'].apply(lambda v: v.y) new_dict = (new_d.groupby(self.id_col) .apply(lambda v: {'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))}) .to_dict()) return new_dict @staticmethod def _get_list_from_dict(d, l):
def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas): w = np.linalg.solve(predicted_array, k_array) zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0]) if np.any(w < 0): # Normalize weights normalized_w = self.normalize_weights(w, zhat, 'ord') zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0]) sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: print(sigmasq) sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], normalized_w, self.unknown_area_id else: sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], w, self.unknown_area_id # Modeling functions def poisson_kriging(self, pk_type='centroid'): """ :param pk_type: available types: - 'ata' for area-to-area Poisson Kriging, - 'atp' for area-to-point Poisson Kriging, - 'centroid' for centroid based PK. To run kriging operation prepare_data method should be invoked first :return zhat, sigma, w[-1][0], w: [value in unknown location, error, estimated mean, weights, area_id] """ vals_of_neigh_areas = self.prepared_data[:, 2] n = len(self.prepared_data) k = np.array([vals_of_neigh_areas]) k = k.T k1 = np.matrix(1) k = np.concatenate((k, k1), axis=0) predicted = None if pk_type == 'centroid': # Calculation of centroid distances distances = calculate_distance(self.prepared_data[:, :2]) predicted = self.model.predict(distances.ravel()) elif pk_type == 'ata' or pk_type == 'atp': # Calculation of weighted distances distances = self._prepare_distances_dict(check_all=False, list_of_idx=self.prepared_data[:, 3]) calculated_distances = block_to_block_distances(distances) sorted_distances = self._get_list_from_dict(calculated_distances, self.prepared_data[:, 3]) predicted = self.model.predict(sorted_distances.ravel()) # Prepare predicted distances array predicted = np.matrix(predicted.reshape(n, n)) # Add weights to predicted values (diagonal) weights_matrix = self.calculate_weight_arr() predicted = predicted + weights_matrix ones = np.matrix(np.ones(n)) predicted = np.concatenate((predicted, ones.T), axis=1) ones = np.matrix(np.ones(n + 1)) predicted = np.concatenate((predicted, ones), axis=0) prediction = self._predict_value(predicted, k, vals_of_neigh_areas) return prediction # Population-based weights array (for m' parameter) def calculate_weight_arr(self): vals_of_neigh_areas = self.prepared_data[:, 2] pop_of_neigh_areas = self.prepared_data[:, 4] weighted = np.sum(vals_of_neigh_areas * pop_of_neigh_areas) weights_arr = weighted / np.sum(pop_of_neigh_areas) w = np.ones(shape=vals_of_neigh_areas.shape) w = (weights_arr * w) / pop_of_neigh_areas return np.diag(w)
"""Function creates list of lists from dict of dicts in the order given by the list with key names""" new_list = [] for val in l: subdict = d[val] inner_list = [] for subval in l: inner_list.append(subdict[subval]) new_list.append(inner_list) return np.array(new_list)
identifier_body
poisson_kriging.py
import numpy as np from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_distance from pyinterpolate.kriging.helper_functions.euclidean_distance import block_to_block_distances from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_block_to_block_distance # TODO: remove matrix data structures class PKrige: """ Class for Poisson Kriging, Area-to-area (ATA) and Area-to-Point (ATP) Poisson Kriging interpolation of the unknown values in a given location (position). Class takes two arguments during the initialization: counties_data - population_data - semivariogram_model - Available methods: Static methods: Data visualization methods: Example how to prepare model: """ def __init__(self): """ Class for calculation of poisson kriging """ self.model = None self.joined_datasets = None self.id_col = None self.val_col = None self.pop_col = None self.total_population_per_unit = None self.centroids_of_areal_data = None self.prepared_data = None self.unknown_area_id = None # Parameters self.lags = None self.step = None self.min_no_of_observations = None self.max_search_radius = None # Data preparation functions def set_params(self, model, joined_datasets, population_series, centroids_dataset, id_col, val_col, pop_col, lags_number, lag_step_size, min_no_of_observations, search_radius): self.model = model self.joined_datasets = joined_datasets self.total_population_per_unit = population_series self.centroids_of_areal_data = centroids_dataset self.id_col = id_col self.val_col = val_col self.pop_col = pop_col self.lags = lags_number self.step = lag_step_size self.min_no_of_observations = min_no_of_observations self.max_search_radius = search_radius print('Parameters have been set') def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid, weighted=False, verbose=False): """ Function prepares data from unknown locations for Poisson Kriging. :param unknown_areal_data: PKData object (row) with areal and population data. :param weighted: distances weighted by population (True) or not (False), :param verbose: if True then method informs about the successful operation. :return prediction: prepared dataset which contains: [[x, y, value, known_area_id, distance_to_unknown_position], [...]], """ areal_id = unknown_areal_data_centroid[0][-1] cx_cy = unknown_areal_data_centroid[0][:2] r = np.array([cx_cy]) known_centroids = self.centroids_of_areal_data kc = known_centroids[:, :2] # Build set for Poisson Kriging if weighted: weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row, areal_id) s = [] for wd in weighted_distances: for k in known_centroids: if wd[1] in k: s.append(wd[0]) break else: pass s = np.array(s).T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn] else: distances_array = np.zeros(kc.shape) for i in range(0, r.shape[1]): distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2 s = distances_array.sum(axis=1) s = np.sqrt(s) s = s.T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn] # remove nans kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)] # sort by distance kriging_data = kriging_data[kriging_data[:, -1].argsort()] # set output by distance params # search radius max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius) output_data = kriging_data[:max_search_pos] # check number of observations if len(output_data) < self.min_no_of_observations: output_data = kriging_data[:self.min_no_of_observations] # TODO: info to the app logs # print('Dataset has been set based on the minimum number of observations') # set final dataset self.prepared_data = output_data if verbose: print('Predictions data prepared') def normalize_weights(self, weights, estimated_value, kriging_type): """ Algorithm for weight normalization to remove negative weights of the points which are clustering. Derived from Deutsch, C.V., Correcting for negative weights in ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996. :param: weights - weights matrix calculated with "normal" kriging procedure, :param: estimated_value - value estimated for a given, unknown point. :return: weight_matrix - normalized weight matrix where negative weights are removed and matrix is scalled to give a sum of all elements equal to 1. """ if kriging_type == 'ord': weight_matrix = weights[:-1].copy() output_matrix = weights[:-1].copy() elif kriging_type == 'sim': weight_matrix = weights.copy() output_matrix = weights.copy() else: print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.') weight_matrix = weights.copy() output_matrix = weights.copy() ###### Calculate average covariance between the location being ###### ###### estimated and the locations with negative weights ###### locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0 locs = locs[:, 0] # Calculate covariance between those points and unknown point if len(locs) >= 1: c = [] mu = 0 for i in locs: _c = estimated_value * self.prepared_data[i, 2] mu = mu + estimated_value + self.prepared_data[i, 2] c.append(_c) output_matrix[i, 0] = 0 mu = mu / len(c) cov = np.sum(c) / len(c) - mu * mu ###### Calculate absolute magnitude of the negative weights ##### w = weight_matrix[weight_matrix < 0] w = w.T magnitude = np.sum(np.abs(w)) / len(w) ###### Test values greater than 0 and check if they need to be ###### rescaled to 0 ###### ###### if weight > 0 and Covariance between unknown point and known ###### point is less than the average covariance between the location ###### being estimated and the locations with negative weights and ###### and weight is less than absolute magnitude of the negative ###### weights then set weight to zero ##### positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0 positive_locs = positive_locs[:, 0] for j in positive_locs: cov_est = (estimated_value * self.prepared_data[j, 2]) / 2 mu = (estimated_value + self.prepared_data[j, 2]) / 2 cov_est = cov_est - mu * mu if cov_est < cov: if weight_matrix[j, 0] < magnitude: output_matrix[j, 0] = 0 ###### Normalize weight matrix to get a sum of all elements equal to 1 ###### output_matrix = output_matrix / np.sum(output_matrix) return output_matrix else: return weights # Data processing private class methods def _calculate_weighted_distances(self, unknown_area, unknown_area_id): """Function calculates weighted distances between unknown area and known areas""" dist_dict = self._prepare_distances_dict(unknown_area) base_area = dist_dict[unknown_area_id] base_area_list = base_area['coordinates'] other_keys = list(dist_dict.keys()) weighted_distances = [] for k in other_keys: other_area_list = dist_dict[k]['coordinates'] dist = calculate_block_to_block_distance(base_area_list, other_area_list) weighted_distances.append([dist, k]) return weighted_distances def
(self, unknown_area): """Function prepares dict with distances for weighted distance calculation between areas""" new_d = self.joined_datasets.copy() new_d = new_d.append(unknown_area, ignore_index=True) try: new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x) new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y) except TypeError: new_d['px'] = new_d['geometry'].apply(lambda v: v.x) new_d['py'] = new_d['geometry'].apply(lambda v: v.y) new_dict = (new_d.groupby(self.id_col) .apply(lambda v: {'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))}) .to_dict()) return new_dict @staticmethod def _get_list_from_dict(d, l): """Function creates list of lists from dict of dicts in the order given by the list with key names""" new_list = [] for val in l: subdict = d[val] inner_list = [] for subval in l: inner_list.append(subdict[subval]) new_list.append(inner_list) return np.array(new_list) def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas): w = np.linalg.solve(predicted_array, k_array) zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0]) if np.any(w < 0): # Normalize weights normalized_w = self.normalize_weights(w, zhat, 'ord') zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0]) sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: print(sigmasq) sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], normalized_w, self.unknown_area_id else: sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], w, self.unknown_area_id # Modeling functions def poisson_kriging(self, pk_type='centroid'): """ :param pk_type: available types: - 'ata' for area-to-area Poisson Kriging, - 'atp' for area-to-point Poisson Kriging, - 'centroid' for centroid based PK. To run kriging operation prepare_data method should be invoked first :return zhat, sigma, w[-1][0], w: [value in unknown location, error, estimated mean, weights, area_id] """ vals_of_neigh_areas = self.prepared_data[:, 2] n = len(self.prepared_data) k = np.array([vals_of_neigh_areas]) k = k.T k1 = np.matrix(1) k = np.concatenate((k, k1), axis=0) predicted = None if pk_type == 'centroid': # Calculation of centroid distances distances = calculate_distance(self.prepared_data[:, :2]) predicted = self.model.predict(distances.ravel()) elif pk_type == 'ata' or pk_type == 'atp': # Calculation of weighted distances distances = self._prepare_distances_dict(check_all=False, list_of_idx=self.prepared_data[:, 3]) calculated_distances = block_to_block_distances(distances) sorted_distances = self._get_list_from_dict(calculated_distances, self.prepared_data[:, 3]) predicted = self.model.predict(sorted_distances.ravel()) # Prepare predicted distances array predicted = np.matrix(predicted.reshape(n, n)) # Add weights to predicted values (diagonal) weights_matrix = self.calculate_weight_arr() predicted = predicted + weights_matrix ones = np.matrix(np.ones(n)) predicted = np.concatenate((predicted, ones.T), axis=1) ones = np.matrix(np.ones(n + 1)) predicted = np.concatenate((predicted, ones), axis=0) prediction = self._predict_value(predicted, k, vals_of_neigh_areas) return prediction # Population-based weights array (for m' parameter) def calculate_weight_arr(self): vals_of_neigh_areas = self.prepared_data[:, 2] pop_of_neigh_areas = self.prepared_data[:, 4] weighted = np.sum(vals_of_neigh_areas * pop_of_neigh_areas) weights_arr = weighted / np.sum(pop_of_neigh_areas) w = np.ones(shape=vals_of_neigh_areas.shape) w = (weights_arr * w) / pop_of_neigh_areas return np.diag(w)
_prepare_distances_dict
identifier_name
poisson_kriging.py
import numpy as np from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_distance from pyinterpolate.kriging.helper_functions.euclidean_distance import block_to_block_distances from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_block_to_block_distance # TODO: remove matrix data structures class PKrige: """ Class for Poisson Kriging, Area-to-area (ATA) and Area-to-Point (ATP) Poisson Kriging interpolation of the unknown values in a given location (position). Class takes two arguments during the initialization: counties_data - population_data - semivariogram_model - Available methods: Static methods: Data visualization methods: Example how to prepare model: """ def __init__(self): """ Class for calculation of poisson kriging """ self.model = None self.joined_datasets = None self.id_col = None self.val_col = None self.pop_col = None self.total_population_per_unit = None self.centroids_of_areal_data = None self.prepared_data = None self.unknown_area_id = None # Parameters self.lags = None self.step = None self.min_no_of_observations = None self.max_search_radius = None # Data preparation functions def set_params(self, model, joined_datasets, population_series, centroids_dataset, id_col, val_col, pop_col, lags_number, lag_step_size, min_no_of_observations, search_radius): self.model = model self.joined_datasets = joined_datasets self.total_population_per_unit = population_series self.centroids_of_areal_data = centroids_dataset self.id_col = id_col self.val_col = val_col self.pop_col = pop_col self.lags = lags_number self.step = lag_step_size self.min_no_of_observations = min_no_of_observations self.max_search_radius = search_radius print('Parameters have been set') def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid, weighted=False, verbose=False): """ Function prepares data from unknown locations for Poisson Kriging. :param unknown_areal_data: PKData object (row) with areal and population data. :param weighted: distances weighted by population (True) or not (False), :param verbose: if True then method informs about the successful operation. :return prediction: prepared dataset which contains: [[x, y, value, known_area_id, distance_to_unknown_position], [...]], """ areal_id = unknown_areal_data_centroid[0][-1] cx_cy = unknown_areal_data_centroid[0][:2] r = np.array([cx_cy]) known_centroids = self.centroids_of_areal_data kc = known_centroids[:, :2] # Build set for Poisson Kriging
s = [] for wd in weighted_distances: for k in known_centroids: if wd[1] in k: s.append(wd[0]) break else: pass s = np.array(s).T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn] else: distances_array = np.zeros(kc.shape) for i in range(0, r.shape[1]): distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2 s = distances_array.sum(axis=1) s = np.sqrt(s) s = s.T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn] # remove nans kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)] # sort by distance kriging_data = kriging_data[kriging_data[:, -1].argsort()] # set output by distance params # search radius max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius) output_data = kriging_data[:max_search_pos] # check number of observations if len(output_data) < self.min_no_of_observations: output_data = kriging_data[:self.min_no_of_observations] # TODO: info to the app logs # print('Dataset has been set based on the minimum number of observations') # set final dataset self.prepared_data = output_data if verbose: print('Predictions data prepared') def normalize_weights(self, weights, estimated_value, kriging_type): """ Algorithm for weight normalization to remove negative weights of the points which are clustering. Derived from Deutsch, C.V., Correcting for negative weights in ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996. :param: weights - weights matrix calculated with "normal" kriging procedure, :param: estimated_value - value estimated for a given, unknown point. :return: weight_matrix - normalized weight matrix where negative weights are removed and matrix is scalled to give a sum of all elements equal to 1. """ if kriging_type == 'ord': weight_matrix = weights[:-1].copy() output_matrix = weights[:-1].copy() elif kriging_type == 'sim': weight_matrix = weights.copy() output_matrix = weights.copy() else: print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.') weight_matrix = weights.copy() output_matrix = weights.copy() ###### Calculate average covariance between the location being ###### ###### estimated and the locations with negative weights ###### locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0 locs = locs[:, 0] # Calculate covariance between those points and unknown point if len(locs) >= 1: c = [] mu = 0 for i in locs: _c = estimated_value * self.prepared_data[i, 2] mu = mu + estimated_value + self.prepared_data[i, 2] c.append(_c) output_matrix[i, 0] = 0 mu = mu / len(c) cov = np.sum(c) / len(c) - mu * mu ###### Calculate absolute magnitude of the negative weights ##### w = weight_matrix[weight_matrix < 0] w = w.T magnitude = np.sum(np.abs(w)) / len(w) ###### Test values greater than 0 and check if they need to be ###### rescaled to 0 ###### ###### if weight > 0 and Covariance between unknown point and known ###### point is less than the average covariance between the location ###### being estimated and the locations with negative weights and ###### and weight is less than absolute magnitude of the negative ###### weights then set weight to zero ##### positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0 positive_locs = positive_locs[:, 0] for j in positive_locs: cov_est = (estimated_value * self.prepared_data[j, 2]) / 2 mu = (estimated_value + self.prepared_data[j, 2]) / 2 cov_est = cov_est - mu * mu if cov_est < cov: if weight_matrix[j, 0] < magnitude: output_matrix[j, 0] = 0 ###### Normalize weight matrix to get a sum of all elements equal to 1 ###### output_matrix = output_matrix / np.sum(output_matrix) return output_matrix else: return weights # Data processing private class methods def _calculate_weighted_distances(self, unknown_area, unknown_area_id): """Function calculates weighted distances between unknown area and known areas""" dist_dict = self._prepare_distances_dict(unknown_area) base_area = dist_dict[unknown_area_id] base_area_list = base_area['coordinates'] other_keys = list(dist_dict.keys()) weighted_distances = [] for k in other_keys: other_area_list = dist_dict[k]['coordinates'] dist = calculate_block_to_block_distance(base_area_list, other_area_list) weighted_distances.append([dist, k]) return weighted_distances def _prepare_distances_dict(self, unknown_area): """Function prepares dict with distances for weighted distance calculation between areas""" new_d = self.joined_datasets.copy() new_d = new_d.append(unknown_area, ignore_index=True) try: new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x) new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y) except TypeError: new_d['px'] = new_d['geometry'].apply(lambda v: v.x) new_d['py'] = new_d['geometry'].apply(lambda v: v.y) new_dict = (new_d.groupby(self.id_col) .apply(lambda v: {'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))}) .to_dict()) return new_dict @staticmethod def _get_list_from_dict(d, l): """Function creates list of lists from dict of dicts in the order given by the list with key names""" new_list = [] for val in l: subdict = d[val] inner_list = [] for subval in l: inner_list.append(subdict[subval]) new_list.append(inner_list) return np.array(new_list) def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas): w = np.linalg.solve(predicted_array, k_array) zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0]) if np.any(w < 0): # Normalize weights normalized_w = self.normalize_weights(w, zhat, 'ord') zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0]) sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: print(sigmasq) sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], normalized_w, self.unknown_area_id else: sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], w, self.unknown_area_id # Modeling functions def poisson_kriging(self, pk_type='centroid'): """ :param pk_type: available types: - 'ata' for area-to-area Poisson Kriging, - 'atp' for area-to-point Poisson Kriging, - 'centroid' for centroid based PK. To run kriging operation prepare_data method should be invoked first :return zhat, sigma, w[-1][0], w: [value in unknown location, error, estimated mean, weights, area_id] """ vals_of_neigh_areas = self.prepared_data[:, 2] n = len(self.prepared_data) k = np.array([vals_of_neigh_areas]) k = k.T k1 = np.matrix(1) k = np.concatenate((k, k1), axis=0) predicted = None if pk_type == 'centroid': # Calculation of centroid distances distances = calculate_distance(self.prepared_data[:, :2]) predicted = self.model.predict(distances.ravel()) elif pk_type == 'ata' or pk_type == 'atp': # Calculation of weighted distances distances = self._prepare_distances_dict(check_all=False, list_of_idx=self.prepared_data[:, 3]) calculated_distances = block_to_block_distances(distances) sorted_distances = self._get_list_from_dict(calculated_distances, self.prepared_data[:, 3]) predicted = self.model.predict(sorted_distances.ravel()) # Prepare predicted distances array predicted = np.matrix(predicted.reshape(n, n)) # Add weights to predicted values (diagonal) weights_matrix = self.calculate_weight_arr() predicted = predicted + weights_matrix ones = np.matrix(np.ones(n)) predicted = np.concatenate((predicted, ones.T), axis=1) ones = np.matrix(np.ones(n + 1)) predicted = np.concatenate((predicted, ones), axis=0) prediction = self._predict_value(predicted, k, vals_of_neigh_areas) return prediction # Population-based weights array (for m' parameter) def calculate_weight_arr(self): vals_of_neigh_areas = self.prepared_data[:, 2] pop_of_neigh_areas = self.prepared_data[:, 4] weighted = np.sum(vals_of_neigh_areas * pop_of_neigh_areas) weights_arr = weighted / np.sum(pop_of_neigh_areas) w = np.ones(shape=vals_of_neigh_areas.shape) w = (weights_arr * w) / pop_of_neigh_areas return np.diag(w)
if weighted: weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row, areal_id)
random_line_split
poisson_kriging.py
import numpy as np from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_distance from pyinterpolate.kriging.helper_functions.euclidean_distance import block_to_block_distances from pyinterpolate.kriging.helper_functions.euclidean_distance import calculate_block_to_block_distance # TODO: remove matrix data structures class PKrige: """ Class for Poisson Kriging, Area-to-area (ATA) and Area-to-Point (ATP) Poisson Kriging interpolation of the unknown values in a given location (position). Class takes two arguments during the initialization: counties_data - population_data - semivariogram_model - Available methods: Static methods: Data visualization methods: Example how to prepare model: """ def __init__(self): """ Class for calculation of poisson kriging """ self.model = None self.joined_datasets = None self.id_col = None self.val_col = None self.pop_col = None self.total_population_per_unit = None self.centroids_of_areal_data = None self.prepared_data = None self.unknown_area_id = None # Parameters self.lags = None self.step = None self.min_no_of_observations = None self.max_search_radius = None # Data preparation functions def set_params(self, model, joined_datasets, population_series, centroids_dataset, id_col, val_col, pop_col, lags_number, lag_step_size, min_no_of_observations, search_radius): self.model = model self.joined_datasets = joined_datasets self.total_population_per_unit = population_series self.centroids_of_areal_data = centroids_dataset self.id_col = id_col self.val_col = val_col self.pop_col = pop_col self.lags = lags_number self.step = lag_step_size self.min_no_of_observations = min_no_of_observations self.max_search_radius = search_radius print('Parameters have been set') def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid, weighted=False, verbose=False): """ Function prepares data from unknown locations for Poisson Kriging. :param unknown_areal_data: PKData object (row) with areal and population data. :param weighted: distances weighted by population (True) or not (False), :param verbose: if True then method informs about the successful operation. :return prediction: prepared dataset which contains: [[x, y, value, known_area_id, distance_to_unknown_position], [...]], """ areal_id = unknown_areal_data_centroid[0][-1] cx_cy = unknown_areal_data_centroid[0][:2] r = np.array([cx_cy]) known_centroids = self.centroids_of_areal_data kc = known_centroids[:, :2] # Build set for Poisson Kriging if weighted: weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row, areal_id) s = [] for wd in weighted_distances: for k in known_centroids: if wd[1] in k: s.append(wd[0]) break else: pass s = np.array(s).T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn] else: distances_array = np.zeros(kc.shape) for i in range(0, r.shape[1]): distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2 s = distances_array.sum(axis=1) s = np.sqrt(s) s = s.T kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn] # remove nans kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)] # sort by distance kriging_data = kriging_data[kriging_data[:, -1].argsort()] # set output by distance params # search radius max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius) output_data = kriging_data[:max_search_pos] # check number of observations if len(output_data) < self.min_no_of_observations: output_data = kriging_data[:self.min_no_of_observations] # TODO: info to the app logs # print('Dataset has been set based on the minimum number of observations') # set final dataset self.prepared_data = output_data if verbose: print('Predictions data prepared') def normalize_weights(self, weights, estimated_value, kriging_type): """ Algorithm for weight normalization to remove negative weights of the points which are clustering. Derived from Deutsch, C.V., Correcting for negative weights in ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996. :param: weights - weights matrix calculated with "normal" kriging procedure, :param: estimated_value - value estimated for a given, unknown point. :return: weight_matrix - normalized weight matrix where negative weights are removed and matrix is scalled to give a sum of all elements equal to 1. """ if kriging_type == 'ord': weight_matrix = weights[:-1].copy() output_matrix = weights[:-1].copy() elif kriging_type == 'sim': weight_matrix = weights.copy() output_matrix = weights.copy() else: print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.') weight_matrix = weights.copy() output_matrix = weights.copy() ###### Calculate average covariance between the location being ###### ###### estimated and the locations with negative weights ###### locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0 locs = locs[:, 0] # Calculate covariance between those points and unknown point if len(locs) >= 1: c = [] mu = 0 for i in locs: _c = estimated_value * self.prepared_data[i, 2] mu = mu + estimated_value + self.prepared_data[i, 2] c.append(_c) output_matrix[i, 0] = 0 mu = mu / len(c) cov = np.sum(c) / len(c) - mu * mu ###### Calculate absolute magnitude of the negative weights ##### w = weight_matrix[weight_matrix < 0] w = w.T magnitude = np.sum(np.abs(w)) / len(w) ###### Test values greater than 0 and check if they need to be ###### rescaled to 0 ###### ###### if weight > 0 and Covariance between unknown point and known ###### point is less than the average covariance between the location ###### being estimated and the locations with negative weights and ###### and weight is less than absolute magnitude of the negative ###### weights then set weight to zero ##### positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0 positive_locs = positive_locs[:, 0] for j in positive_locs: cov_est = (estimated_value * self.prepared_data[j, 2]) / 2 mu = (estimated_value + self.prepared_data[j, 2]) / 2 cov_est = cov_est - mu * mu if cov_est < cov: if weight_matrix[j, 0] < magnitude: output_matrix[j, 0] = 0 ###### Normalize weight matrix to get a sum of all elements equal to 1 ###### output_matrix = output_matrix / np.sum(output_matrix) return output_matrix else: return weights # Data processing private class methods def _calculate_weighted_distances(self, unknown_area, unknown_area_id): """Function calculates weighted distances between unknown area and known areas""" dist_dict = self._prepare_distances_dict(unknown_area) base_area = dist_dict[unknown_area_id] base_area_list = base_area['coordinates'] other_keys = list(dist_dict.keys()) weighted_distances = [] for k in other_keys: other_area_list = dist_dict[k]['coordinates'] dist = calculate_block_to_block_distance(base_area_list, other_area_list) weighted_distances.append([dist, k]) return weighted_distances def _prepare_distances_dict(self, unknown_area): """Function prepares dict with distances for weighted distance calculation between areas""" new_d = self.joined_datasets.copy() new_d = new_d.append(unknown_area, ignore_index=True) try: new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x) new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y) except TypeError: new_d['px'] = new_d['geometry'].apply(lambda v: v.x) new_d['py'] = new_d['geometry'].apply(lambda v: v.y) new_dict = (new_d.groupby(self.id_col) .apply(lambda v: {'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))}) .to_dict()) return new_dict @staticmethod def _get_list_from_dict(d, l): """Function creates list of lists from dict of dicts in the order given by the list with key names""" new_list = [] for val in l: subdict = d[val] inner_list = [] for subval in l: inner_list.append(subdict[subval]) new_list.append(inner_list) return np.array(new_list) def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas): w = np.linalg.solve(predicted_array, k_array) zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0]) if np.any(w < 0): # Normalize weights normalized_w = self.normalize_weights(w, zhat, 'ord') zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0]) sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: print(sigmasq) sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], normalized_w, self.unknown_area_id else:
# Modeling functions def poisson_kriging(self, pk_type='centroid'): """ :param pk_type: available types: - 'ata' for area-to-area Poisson Kriging, - 'atp' for area-to-point Poisson Kriging, - 'centroid' for centroid based PK. To run kriging operation prepare_data method should be invoked first :return zhat, sigma, w[-1][0], w: [value in unknown location, error, estimated mean, weights, area_id] """ vals_of_neigh_areas = self.prepared_data[:, 2] n = len(self.prepared_data) k = np.array([vals_of_neigh_areas]) k = k.T k1 = np.matrix(1) k = np.concatenate((k, k1), axis=0) predicted = None if pk_type == 'centroid': # Calculation of centroid distances distances = calculate_distance(self.prepared_data[:, :2]) predicted = self.model.predict(distances.ravel()) elif pk_type == 'ata' or pk_type == 'atp': # Calculation of weighted distances distances = self._prepare_distances_dict(check_all=False, list_of_idx=self.prepared_data[:, 3]) calculated_distances = block_to_block_distances(distances) sorted_distances = self._get_list_from_dict(calculated_distances, self.prepared_data[:, 3]) predicted = self.model.predict(sorted_distances.ravel()) # Prepare predicted distances array predicted = np.matrix(predicted.reshape(n, n)) # Add weights to predicted values (diagonal) weights_matrix = self.calculate_weight_arr() predicted = predicted + weights_matrix ones = np.matrix(np.ones(n)) predicted = np.concatenate((predicted, ones.T), axis=1) ones = np.matrix(np.ones(n + 1)) predicted = np.concatenate((predicted, ones), axis=0) prediction = self._predict_value(predicted, k, vals_of_neigh_areas) return prediction # Population-based weights array (for m' parameter) def calculate_weight_arr(self): vals_of_neigh_areas = self.prepared_data[:, 2] pop_of_neigh_areas = self.prepared_data[:, 4] weighted = np.sum(vals_of_neigh_areas * pop_of_neigh_areas) weights_arr = weighted / np.sum(pop_of_neigh_areas) w = np.ones(shape=vals_of_neigh_areas.shape) w = (weights_arr * w) / pop_of_neigh_areas return np.diag(w)
sigmasq = (w.T * k_array)[0, 0] if sigmasq < 0: sigma = 0 else: sigma = np.sqrt(sigmasq) return zhat, sigma, w[-1][0], w, self.unknown_area_id
conditional_block
stream.go
package ads import ( "context" "strconv" "strings" mapset "github.com/deckarep/golang-set" xds_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/pkg/errors" "github.com/openservicemesh/osm/pkg/announcements" "github.com/openservicemesh/osm/pkg/catalog" "github.com/openservicemesh/osm/pkg/certificate" "github.com/openservicemesh/osm/pkg/constants" "github.com/openservicemesh/osm/pkg/envoy" "github.com/openservicemesh/osm/pkg/identity" "github.com/openservicemesh/osm/pkg/kubernetes/events" "github.com/openservicemesh/osm/pkg/metricsstore" "github.com/openservicemesh/osm/pkg/utils" ) // StreamAggregatedResources handles streaming of the clusters to the connected Envoy proxies // This is evaluated once per new Envoy proxy connecting and remains running for the duration of the gRPC socket. func (s *Server) StreamAggregatedResources(server xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { // When a new Envoy proxy connects, ValidateClient would ensure that it has a valid certificate, // and the Subject CN is in the allowedCommonNames set. certCommonName, certSerialNumber, err := utils.ValidateClient(server.Context(), nil) if err != nil { return errors.Wrap(err, "Could not start Aggregated Discovery Service gRPC stream for newly connected Envoy proxy") } // If maxDataPlaneConnections is enabled i.e. not 0, then check that the number of Envoy connections is less than maxDataPlaneConnections if s.cfg.GetMaxDataPlaneConnections() != 0 && s.proxyRegistry.GetConnectedProxyCount() >= s.cfg.GetMaxDataPlaneConnections() { return errTooManyConnections } log.Trace().Msgf("Envoy with certificate SerialNumber=%s connected", certSerialNumber) metricsstore.DefaultMetricsStore.ProxyConnectCount.Inc() // This is the Envoy proxy that just connected to the control plane. // NOTE: This is step 1 of the registration. At this point we do not yet have context on the Pod. // Details on which Pod this Envoy is fronting will arrive via xDS in the NODE_ID string. // When this arrives we will call RegisterProxy() a second time - this time with Pod context! proxy := envoy.NewProxy(certCommonName, certSerialNumber, utils.GetIPFromContext(server.Context())) s.proxyRegistry.RegisterProxy(proxy) // First of Two invocations. Second one will be during xDS hand-shake! defer s.proxyRegistry.UnregisterProxy(proxy) ctx, cancel := context.WithCancel(server.Context()) defer cancel() quit := make(chan struct{}) requests := make(chan xds_discovery.DiscoveryRequest) // This helper handles receiving messages from the connected Envoys // and any gRPC error states. go receive(requests, &server, proxy, quit, s.proxyRegistry) // Register to Envoy global broadcast updates broadcastUpdate := events.GetPubSubInstance().Subscribe(announcements.ProxyBroadcast) // Register for certificate rotation updates certAnnouncement := events.GetPubSubInstance().Subscribe(announcements.CertificateRotated) // Issues a send all response on a connecting envoy // If this were to fail, it most likely just means we still have configuration being applied on flight, // which will get triggered by the dispatcher anyway if err = s.sendResponse(proxy, &server, nil, s.cfg, envoy.XDSResponseOrder...); err != nil { log.Error().Err(err).Msgf("Initial sendResponse for proxy %s returned error", proxy.GetCertificateSerialNumber()) } newJob := func(typeURIs []envoy.TypeURI, discoveryRequest *xds_discovery.DiscoveryRequest) *proxyResponseJob { return &proxyResponseJob{ typeURIs: typeURIs, proxy: proxy, adsStream: &server, request: discoveryRequest, xdsServer: s, done: make(chan struct{}), } } for { select { case <-ctx.Done(): metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case <-quit: log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case discoveryRequest, ok := <-requests: if !ok { log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return errGrpcClosed } // This function call runs xDS proto state machine given DiscoveryRequest as input. // It's output is the decision to reply or not to this request. if !respondToRequest(proxy, &discoveryRequest) { continue } typeURL := envoy.TypeURI(discoveryRequest.TypeUrl) var typesRequest []envoy.TypeURI if typeURL == envoy.TypeWildcard { typesRequest = envoy.XDSResponseOrder } else { typesRequest = []envoy.TypeURI{typeURL} } <-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest)) case <-broadcastUpdate: log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Queue a full configuration update <-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil)) case certUpdateMsg := <-certAnnouncement: cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater) if isCNforProxy(proxy, cert.GetCommonName()) { // The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated // with this proxy, so update the secrets corresponding to this certificate via SDS. log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Empty DiscoveryRequest should create the SDS specific request // Prepare to queue the SDS proxy response job on the worker pool <-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil)) } } } } // respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with // an xDS DiscoveryResponse. func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool { var err error var requestVersion uint64 var requestNonce string var lastVersion uint64 var lastNonce string log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames, proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl))) if discoveryRequest.ErrorDetail != nil { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo) return false } typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl] if !ok { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl) return false } // It is possible for Envoy to return an empty VersionInfo. // When that's the case - start with 0 if discoveryRequest.VersionInfo != "" { if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil { // It is probable that Envoy responded with a VersionInfo we did not understand log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err) return false } } // Set last version applied proxy.SetLastAppliedVersion(typeURL, requestVersion) requestNonce = discoveryRequest.ResponseNonce // Handle first request on stream, should always reply to empty nonce if requestNonce == "" { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames) return true } // The version of the config received along with the DiscoveryRequest (ackVersion) // is what the Envoy proxy may be acknowledging. It is acknowledging // and not requesting when the ackVersion is <= what we last sent. // It is possible however for a proxy to have a version that is higher // than what we last sent. (Perhaps the control plane restarted.) // In that case we want to make sure that we send new responses with // VersionInfo incremented starting with the version which the proxy last had. lastVersion = proxy.GetLastSentVersion(typeURL) if requestVersion > lastVersion { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion) proxy.SetLastSentVersion(typeURL, requestVersion) return true } // Compare Nonces // As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the // nonce is non-empty. lastNonce = proxy.GetLastSentNonce(typeURL) if requestNonce != lastNonce { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce) return false } // ---- // At this point, there is no error and nonces match, it is guaranteed an ACK with last version. // What's left is to check if the resources listed are the same. If they are not, we must respond // with the new resources requested. // // In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources". // The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case. // // This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference // https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347 // ---- resourcesLastSent := proxy.GetLastResourcesSent(typeURL) resourcesRequested := getRequestedResourceNamesSet(discoveryRequest) // If what we last sent is a superset of what the // requests resources subscribes to, it's ACK and nothing needs to be done. // Otherwise, envoy might be asking us for additional resources that have to be sent along last time. // Difference returns elemenets of <requested> that are not part of elements of <last sent> requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent) if requestedResourcesDifference.Cardinality() != 0 { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference) return true } log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested) return false } // Helper to turn the resource names on a discovery request to a Set for later efficient intersection func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set { resourcesRequested := mapset.NewSet() for idx := range discoveryRequest.ResourceNames
return resourcesRequested } // isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity. // Proxy identity corresponds to the k8s service account, while the workload certificate is of the form // <svc-account>.<namespace>.<trust-domain>. func isCNforProxy(proxy *envoy.Proxy, cn certificate.CommonName) bool { proxyIdentity, err := catalog.GetServiceAccountFromProxyCertificate(proxy.GetCertificateCommonName()) if err != nil { log.Error().Err(err).Msgf("Error looking up proxy identity for proxy with SerialNumber=%s on Pod with UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) return false } // Workload certificate CN is of the form <svc-account>.<namespace>.<trust-domain> chunks := strings.Split(cn.String(), constants.DomainDelimiter) if len(chunks) < 3 { return false } identityForCN := identity.K8sServiceAccount{Name: chunks[0], Namespace: chunks[1]} return identityForCN == proxyIdentity }
{ resourcesRequested.Add(discoveryRequest.ResourceNames[idx]) }
conditional_block
stream.go
package ads import ( "context" "strconv" "strings" mapset "github.com/deckarep/golang-set" xds_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/pkg/errors" "github.com/openservicemesh/osm/pkg/announcements" "github.com/openservicemesh/osm/pkg/catalog" "github.com/openservicemesh/osm/pkg/certificate" "github.com/openservicemesh/osm/pkg/constants" "github.com/openservicemesh/osm/pkg/envoy" "github.com/openservicemesh/osm/pkg/identity" "github.com/openservicemesh/osm/pkg/kubernetes/events" "github.com/openservicemesh/osm/pkg/metricsstore" "github.com/openservicemesh/osm/pkg/utils" ) // StreamAggregatedResources handles streaming of the clusters to the connected Envoy proxies // This is evaluated once per new Envoy proxy connecting and remains running for the duration of the gRPC socket. func (s *Server) StreamAggregatedResources(server xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { // When a new Envoy proxy connects, ValidateClient would ensure that it has a valid certificate, // and the Subject CN is in the allowedCommonNames set. certCommonName, certSerialNumber, err := utils.ValidateClient(server.Context(), nil) if err != nil { return errors.Wrap(err, "Could not start Aggregated Discovery Service gRPC stream for newly connected Envoy proxy") } // If maxDataPlaneConnections is enabled i.e. not 0, then check that the number of Envoy connections is less than maxDataPlaneConnections if s.cfg.GetMaxDataPlaneConnections() != 0 && s.proxyRegistry.GetConnectedProxyCount() >= s.cfg.GetMaxDataPlaneConnections() { return errTooManyConnections } log.Trace().Msgf("Envoy with certificate SerialNumber=%s connected", certSerialNumber) metricsstore.DefaultMetricsStore.ProxyConnectCount.Inc() // This is the Envoy proxy that just connected to the control plane. // NOTE: This is step 1 of the registration. At this point we do not yet have context on the Pod. // Details on which Pod this Envoy is fronting will arrive via xDS in the NODE_ID string. // When this arrives we will call RegisterProxy() a second time - this time with Pod context! proxy := envoy.NewProxy(certCommonName, certSerialNumber, utils.GetIPFromContext(server.Context())) s.proxyRegistry.RegisterProxy(proxy) // First of Two invocations. Second one will be during xDS hand-shake! defer s.proxyRegistry.UnregisterProxy(proxy) ctx, cancel := context.WithCancel(server.Context()) defer cancel() quit := make(chan struct{}) requests := make(chan xds_discovery.DiscoveryRequest) // This helper handles receiving messages from the connected Envoys // and any gRPC error states. go receive(requests, &server, proxy, quit, s.proxyRegistry) // Register to Envoy global broadcast updates broadcastUpdate := events.GetPubSubInstance().Subscribe(announcements.ProxyBroadcast) // Register for certificate rotation updates certAnnouncement := events.GetPubSubInstance().Subscribe(announcements.CertificateRotated) // Issues a send all response on a connecting envoy // If this were to fail, it most likely just means we still have configuration being applied on flight, // which will get triggered by the dispatcher anyway if err = s.sendResponse(proxy, &server, nil, s.cfg, envoy.XDSResponseOrder...); err != nil { log.Error().Err(err).Msgf("Initial sendResponse for proxy %s returned error", proxy.GetCertificateSerialNumber()) } newJob := func(typeURIs []envoy.TypeURI, discoveryRequest *xds_discovery.DiscoveryRequest) *proxyResponseJob { return &proxyResponseJob{ typeURIs: typeURIs, proxy: proxy, adsStream: &server, request: discoveryRequest, xdsServer: s, done: make(chan struct{}), } } for { select { case <-ctx.Done(): metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case <-quit: log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case discoveryRequest, ok := <-requests: if !ok { log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return errGrpcClosed } // This function call runs xDS proto state machine given DiscoveryRequest as input. // It's output is the decision to reply or not to this request. if !respondToRequest(proxy, &discoveryRequest) { continue } typeURL := envoy.TypeURI(discoveryRequest.TypeUrl) var typesRequest []envoy.TypeURI if typeURL == envoy.TypeWildcard { typesRequest = envoy.XDSResponseOrder } else { typesRequest = []envoy.TypeURI{typeURL} } <-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest)) case <-broadcastUpdate: log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Queue a full configuration update <-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil)) case certUpdateMsg := <-certAnnouncement: cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater) if isCNforProxy(proxy, cert.GetCommonName()) { // The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated // with this proxy, so update the secrets corresponding to this certificate via SDS. log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Empty DiscoveryRequest should create the SDS specific request // Prepare to queue the SDS proxy response job on the worker pool <-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil)) } } } } // respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with // an xDS DiscoveryResponse. func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool { var err error var requestVersion uint64 var requestNonce string var lastVersion uint64 var lastNonce string log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames, proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl))) if discoveryRequest.ErrorDetail != nil { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo) return false } typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl] if !ok { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl) return false } // It is possible for Envoy to return an empty VersionInfo. // When that's the case - start with 0 if discoveryRequest.VersionInfo != "" { if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil { // It is probable that Envoy responded with a VersionInfo we did not understand log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err) return false } } // Set last version applied proxy.SetLastAppliedVersion(typeURL, requestVersion) requestNonce = discoveryRequest.ResponseNonce // Handle first request on stream, should always reply to empty nonce if requestNonce == "" { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames) return true } // The version of the config received along with the DiscoveryRequest (ackVersion) // is what the Envoy proxy may be acknowledging. It is acknowledging // and not requesting when the ackVersion is <= what we last sent. // It is possible however for a proxy to have a version that is higher // than what we last sent. (Perhaps the control plane restarted.) // In that case we want to make sure that we send new responses with // VersionInfo incremented starting with the version which the proxy last had. lastVersion = proxy.GetLastSentVersion(typeURL) if requestVersion > lastVersion { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion) proxy.SetLastSentVersion(typeURL, requestVersion) return true } // Compare Nonces // As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the // nonce is non-empty. lastNonce = proxy.GetLastSentNonce(typeURL) if requestNonce != lastNonce { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce) return false } // ---- // At this point, there is no error and nonces match, it is guaranteed an ACK with last version. // What's left is to check if the resources listed are the same. If they are not, we must respond // with the new resources requested. // // In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources". // The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case. // // This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference // https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347 // ---- resourcesLastSent := proxy.GetLastResourcesSent(typeURL) resourcesRequested := getRequestedResourceNamesSet(discoveryRequest) // If what we last sent is a superset of what the // requests resources subscribes to, it's ACK and nothing needs to be done. // Otherwise, envoy might be asking us for additional resources that have to be sent along last time. // Difference returns elemenets of <requested> that are not part of elements of <last sent> requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent) if requestedResourcesDifference.Cardinality() != 0 { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference) return true } log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested) return false } // Helper to turn the resource names on a discovery request to a Set for later efficient intersection func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set { resourcesRequested := mapset.NewSet() for idx := range discoveryRequest.ResourceNames { resourcesRequested.Add(discoveryRequest.ResourceNames[idx]) } return resourcesRequested } // isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity. // Proxy identity corresponds to the k8s service account, while the workload certificate is of the form // <svc-account>.<namespace>.<trust-domain>. func
(proxy *envoy.Proxy, cn certificate.CommonName) bool { proxyIdentity, err := catalog.GetServiceAccountFromProxyCertificate(proxy.GetCertificateCommonName()) if err != nil { log.Error().Err(err).Msgf("Error looking up proxy identity for proxy with SerialNumber=%s on Pod with UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) return false } // Workload certificate CN is of the form <svc-account>.<namespace>.<trust-domain> chunks := strings.Split(cn.String(), constants.DomainDelimiter) if len(chunks) < 3 { return false } identityForCN := identity.K8sServiceAccount{Name: chunks[0], Namespace: chunks[1]} return identityForCN == proxyIdentity }
isCNforProxy
identifier_name
stream.go
package ads import ( "context" "strconv" "strings" mapset "github.com/deckarep/golang-set" xds_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/pkg/errors" "github.com/openservicemesh/osm/pkg/announcements" "github.com/openservicemesh/osm/pkg/catalog" "github.com/openservicemesh/osm/pkg/certificate" "github.com/openservicemesh/osm/pkg/constants" "github.com/openservicemesh/osm/pkg/envoy" "github.com/openservicemesh/osm/pkg/identity" "github.com/openservicemesh/osm/pkg/kubernetes/events" "github.com/openservicemesh/osm/pkg/metricsstore" "github.com/openservicemesh/osm/pkg/utils" ) // StreamAggregatedResources handles streaming of the clusters to the connected Envoy proxies // This is evaluated once per new Envoy proxy connecting and remains running for the duration of the gRPC socket. func (s *Server) StreamAggregatedResources(server xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { // When a new Envoy proxy connects, ValidateClient would ensure that it has a valid certificate, // and the Subject CN is in the allowedCommonNames set. certCommonName, certSerialNumber, err := utils.ValidateClient(server.Context(), nil) if err != nil { return errors.Wrap(err, "Could not start Aggregated Discovery Service gRPC stream for newly connected Envoy proxy") } // If maxDataPlaneConnections is enabled i.e. not 0, then check that the number of Envoy connections is less than maxDataPlaneConnections if s.cfg.GetMaxDataPlaneConnections() != 0 && s.proxyRegistry.GetConnectedProxyCount() >= s.cfg.GetMaxDataPlaneConnections() { return errTooManyConnections } log.Trace().Msgf("Envoy with certificate SerialNumber=%s connected", certSerialNumber) metricsstore.DefaultMetricsStore.ProxyConnectCount.Inc() // This is the Envoy proxy that just connected to the control plane. // NOTE: This is step 1 of the registration. At this point we do not yet have context on the Pod. // Details on which Pod this Envoy is fronting will arrive via xDS in the NODE_ID string. // When this arrives we will call RegisterProxy() a second time - this time with Pod context! proxy := envoy.NewProxy(certCommonName, certSerialNumber, utils.GetIPFromContext(server.Context())) s.proxyRegistry.RegisterProxy(proxy) // First of Two invocations. Second one will be during xDS hand-shake! defer s.proxyRegistry.UnregisterProxy(proxy) ctx, cancel := context.WithCancel(server.Context()) defer cancel() quit := make(chan struct{}) requests := make(chan xds_discovery.DiscoveryRequest) // This helper handles receiving messages from the connected Envoys // and any gRPC error states. go receive(requests, &server, proxy, quit, s.proxyRegistry) // Register to Envoy global broadcast updates broadcastUpdate := events.GetPubSubInstance().Subscribe(announcements.ProxyBroadcast) // Register for certificate rotation updates certAnnouncement := events.GetPubSubInstance().Subscribe(announcements.CertificateRotated) // Issues a send all response on a connecting envoy // If this were to fail, it most likely just means we still have configuration being applied on flight, // which will get triggered by the dispatcher anyway if err = s.sendResponse(proxy, &server, nil, s.cfg, envoy.XDSResponseOrder...); err != nil { log.Error().Err(err).Msgf("Initial sendResponse for proxy %s returned error", proxy.GetCertificateSerialNumber()) } newJob := func(typeURIs []envoy.TypeURI, discoveryRequest *xds_discovery.DiscoveryRequest) *proxyResponseJob { return &proxyResponseJob{ typeURIs: typeURIs, proxy: proxy, adsStream: &server, request: discoveryRequest, xdsServer: s, done: make(chan struct{}), } } for { select { case <-ctx.Done(): metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case <-quit: log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case discoveryRequest, ok := <-requests: if !ok { log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return errGrpcClosed } // This function call runs xDS proto state machine given DiscoveryRequest as input. // It's output is the decision to reply or not to this request. if !respondToRequest(proxy, &discoveryRequest) { continue } typeURL := envoy.TypeURI(discoveryRequest.TypeUrl) var typesRequest []envoy.TypeURI if typeURL == envoy.TypeWildcard { typesRequest = envoy.XDSResponseOrder } else { typesRequest = []envoy.TypeURI{typeURL} } <-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest)) case <-broadcastUpdate: log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Queue a full configuration update <-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil)) case certUpdateMsg := <-certAnnouncement: cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater) if isCNforProxy(proxy, cert.GetCommonName()) { // The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated // with this proxy, so update the secrets corresponding to this certificate via SDS. log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Empty DiscoveryRequest should create the SDS specific request // Prepare to queue the SDS proxy response job on the worker pool <-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil)) } } } } // respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with // an xDS DiscoveryResponse. func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool { var err error var requestVersion uint64 var requestNonce string var lastVersion uint64 var lastNonce string log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames, proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl))) if discoveryRequest.ErrorDetail != nil { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo) return false } typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl] if !ok { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl) return false } // It is possible for Envoy to return an empty VersionInfo. // When that's the case - start with 0 if discoveryRequest.VersionInfo != "" { if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil { // It is probable that Envoy responded with a VersionInfo we did not understand log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err) return false } } // Set last version applied proxy.SetLastAppliedVersion(typeURL, requestVersion) requestNonce = discoveryRequest.ResponseNonce // Handle first request on stream, should always reply to empty nonce if requestNonce == "" { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames) return true } // The version of the config received along with the DiscoveryRequest (ackVersion) // is what the Envoy proxy may be acknowledging. It is acknowledging // and not requesting when the ackVersion is <= what we last sent. // It is possible however for a proxy to have a version that is higher // than what we last sent. (Perhaps the control plane restarted.) // In that case we want to make sure that we send new responses with // VersionInfo incremented starting with the version which the proxy last had. lastVersion = proxy.GetLastSentVersion(typeURL) if requestVersion > lastVersion { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion) proxy.SetLastSentVersion(typeURL, requestVersion) return true } // Compare Nonces // As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the // nonce is non-empty. lastNonce = proxy.GetLastSentNonce(typeURL) if requestNonce != lastNonce { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce) return false } // ---- // At this point, there is no error and nonces match, it is guaranteed an ACK with last version. // What's left is to check if the resources listed are the same. If they are not, we must respond // with the new resources requested. // // In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources". // The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case. // // This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference // https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347 // ---- resourcesLastSent := proxy.GetLastResourcesSent(typeURL) resourcesRequested := getRequestedResourceNamesSet(discoveryRequest) // If what we last sent is a superset of what the // requests resources subscribes to, it's ACK and nothing needs to be done. // Otherwise, envoy might be asking us for additional resources that have to be sent along last time. // Difference returns elemenets of <requested> that are not part of elements of <last sent> requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent) if requestedResourcesDifference.Cardinality() != 0 { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference) return true } log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested) return false } // Helper to turn the resource names on a discovery request to a Set for later efficient intersection func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set { resourcesRequested := mapset.NewSet() for idx := range discoveryRequest.ResourceNames { resourcesRequested.Add(discoveryRequest.ResourceNames[idx]) } return resourcesRequested } // isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity. // Proxy identity corresponds to the k8s service account, while the workload certificate is of the form // <svc-account>.<namespace>.<trust-domain>. func isCNforProxy(proxy *envoy.Proxy, cn certificate.CommonName) bool { proxyIdentity, err := catalog.GetServiceAccountFromProxyCertificate(proxy.GetCertificateCommonName()) if err != nil { log.Error().Err(err).Msgf("Error looking up proxy identity for proxy with SerialNumber=%s on Pod with UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) return false } // Workload certificate CN is of the form <svc-account>.<namespace>.<trust-domain> chunks := strings.Split(cn.String(), constants.DomainDelimiter) if len(chunks) < 3 { return false } identityForCN := identity.K8sServiceAccount{Name: chunks[0], Namespace: chunks[1]} return identityForCN == proxyIdentity
}
random_line_split
stream.go
package ads import ( "context" "strconv" "strings" mapset "github.com/deckarep/golang-set" xds_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/pkg/errors" "github.com/openservicemesh/osm/pkg/announcements" "github.com/openservicemesh/osm/pkg/catalog" "github.com/openservicemesh/osm/pkg/certificate" "github.com/openservicemesh/osm/pkg/constants" "github.com/openservicemesh/osm/pkg/envoy" "github.com/openservicemesh/osm/pkg/identity" "github.com/openservicemesh/osm/pkg/kubernetes/events" "github.com/openservicemesh/osm/pkg/metricsstore" "github.com/openservicemesh/osm/pkg/utils" ) // StreamAggregatedResources handles streaming of the clusters to the connected Envoy proxies // This is evaluated once per new Envoy proxy connecting and remains running for the duration of the gRPC socket. func (s *Server) StreamAggregatedResources(server xds_discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { // When a new Envoy proxy connects, ValidateClient would ensure that it has a valid certificate, // and the Subject CN is in the allowedCommonNames set. certCommonName, certSerialNumber, err := utils.ValidateClient(server.Context(), nil) if err != nil { return errors.Wrap(err, "Could not start Aggregated Discovery Service gRPC stream for newly connected Envoy proxy") } // If maxDataPlaneConnections is enabled i.e. not 0, then check that the number of Envoy connections is less than maxDataPlaneConnections if s.cfg.GetMaxDataPlaneConnections() != 0 && s.proxyRegistry.GetConnectedProxyCount() >= s.cfg.GetMaxDataPlaneConnections() { return errTooManyConnections } log.Trace().Msgf("Envoy with certificate SerialNumber=%s connected", certSerialNumber) metricsstore.DefaultMetricsStore.ProxyConnectCount.Inc() // This is the Envoy proxy that just connected to the control plane. // NOTE: This is step 1 of the registration. At this point we do not yet have context on the Pod. // Details on which Pod this Envoy is fronting will arrive via xDS in the NODE_ID string. // When this arrives we will call RegisterProxy() a second time - this time with Pod context! proxy := envoy.NewProxy(certCommonName, certSerialNumber, utils.GetIPFromContext(server.Context())) s.proxyRegistry.RegisterProxy(proxy) // First of Two invocations. Second one will be during xDS hand-shake! defer s.proxyRegistry.UnregisterProxy(proxy) ctx, cancel := context.WithCancel(server.Context()) defer cancel() quit := make(chan struct{}) requests := make(chan xds_discovery.DiscoveryRequest) // This helper handles receiving messages from the connected Envoys // and any gRPC error states. go receive(requests, &server, proxy, quit, s.proxyRegistry) // Register to Envoy global broadcast updates broadcastUpdate := events.GetPubSubInstance().Subscribe(announcements.ProxyBroadcast) // Register for certificate rotation updates certAnnouncement := events.GetPubSubInstance().Subscribe(announcements.CertificateRotated) // Issues a send all response on a connecting envoy // If this were to fail, it most likely just means we still have configuration being applied on flight, // which will get triggered by the dispatcher anyway if err = s.sendResponse(proxy, &server, nil, s.cfg, envoy.XDSResponseOrder...); err != nil { log.Error().Err(err).Msgf("Initial sendResponse for proxy %s returned error", proxy.GetCertificateSerialNumber()) } newJob := func(typeURIs []envoy.TypeURI, discoveryRequest *xds_discovery.DiscoveryRequest) *proxyResponseJob { return &proxyResponseJob{ typeURIs: typeURIs, proxy: proxy, adsStream: &server, request: discoveryRequest, xdsServer: s, done: make(chan struct{}), } } for { select { case <-ctx.Done(): metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case <-quit: log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return nil case discoveryRequest, ok := <-requests: if !ok { log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec() return errGrpcClosed } // This function call runs xDS proto state machine given DiscoveryRequest as input. // It's output is the decision to reply or not to this request. if !respondToRequest(proxy, &discoveryRequest) { continue } typeURL := envoy.TypeURI(discoveryRequest.TypeUrl) var typesRequest []envoy.TypeURI if typeURL == envoy.TypeWildcard { typesRequest = envoy.XDSResponseOrder } else { typesRequest = []envoy.TypeURI{typeURL} } <-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest)) case <-broadcastUpdate: log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Queue a full configuration update <-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil)) case certUpdateMsg := <-certAnnouncement: cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater) if isCNforProxy(proxy, cert.GetCommonName()) { // The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated // with this proxy, so update the secrets corresponding to this certificate via SDS. log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) // Empty DiscoveryRequest should create the SDS specific request // Prepare to queue the SDS proxy response job on the worker pool <-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil)) } } } } // respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with // an xDS DiscoveryResponse. func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool { var err error var requestVersion uint64 var requestNonce string var lastVersion uint64 var lastNonce string log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames, proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl))) if discoveryRequest.ErrorDetail != nil { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo) return false } typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl] if !ok { log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl) return false } // It is possible for Envoy to return an empty VersionInfo. // When that's the case - start with 0 if discoveryRequest.VersionInfo != "" { if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil { // It is probable that Envoy responded with a VersionInfo we did not understand log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err) return false } } // Set last version applied proxy.SetLastAppliedVersion(typeURL, requestVersion) requestNonce = discoveryRequest.ResponseNonce // Handle first request on stream, should always reply to empty nonce if requestNonce == "" { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames) return true } // The version of the config received along with the DiscoveryRequest (ackVersion) // is what the Envoy proxy may be acknowledging. It is acknowledging // and not requesting when the ackVersion is <= what we last sent. // It is possible however for a proxy to have a version that is higher // than what we last sent. (Perhaps the control plane restarted.) // In that case we want to make sure that we send new responses with // VersionInfo incremented starting with the version which the proxy last had. lastVersion = proxy.GetLastSentVersion(typeURL) if requestVersion > lastVersion { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion) proxy.SetLastSentVersion(typeURL, requestVersion) return true } // Compare Nonces // As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the // nonce is non-empty. lastNonce = proxy.GetLastSentNonce(typeURL) if requestNonce != lastNonce { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce) return false } // ---- // At this point, there is no error and nonces match, it is guaranteed an ACK with last version. // What's left is to check if the resources listed are the same. If they are not, we must respond // with the new resources requested. // // In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources". // The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case. // // This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference // https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347 // ---- resourcesLastSent := proxy.GetLastResourcesSent(typeURL) resourcesRequested := getRequestedResourceNamesSet(discoveryRequest) // If what we last sent is a superset of what the // requests resources subscribes to, it's ACK and nothing needs to be done. // Otherwise, envoy might be asking us for additional resources that have to be sent along last time. // Difference returns elemenets of <requested> that are not part of elements of <last sent> requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent) if requestedResourcesDifference.Cardinality() != 0 { log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference) return true } log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v", proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested) return false } // Helper to turn the resource names on a discovery request to a Set for later efficient intersection func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set
// isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity. // Proxy identity corresponds to the k8s service account, while the workload certificate is of the form // <svc-account>.<namespace>.<trust-domain>. func isCNforProxy(proxy *envoy.Proxy, cn certificate.CommonName) bool { proxyIdentity, err := catalog.GetServiceAccountFromProxyCertificate(proxy.GetCertificateCommonName()) if err != nil { log.Error().Err(err).Msgf("Error looking up proxy identity for proxy with SerialNumber=%s on Pod with UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID()) return false } // Workload certificate CN is of the form <svc-account>.<namespace>.<trust-domain> chunks := strings.Split(cn.String(), constants.DomainDelimiter) if len(chunks) < 3 { return false } identityForCN := identity.K8sServiceAccount{Name: chunks[0], Namespace: chunks[1]} return identityForCN == proxyIdentity }
{ resourcesRequested := mapset.NewSet() for idx := range discoveryRequest.ResourceNames { resourcesRequested.Add(discoveryRequest.ResourceNames[idx]) } return resourcesRequested }
identifier_body
main.py
#!/usr/bin/env python # """ Mindset Meter The mindset meter will test a users psychometrics and report their results. These results will also be shared with the person who invited them to answer the survey. This file will serve as a prototype of the web based mindsetmeter meter and develop its api. bmh October 2012 """ # LIBRARIES # # web app import webapp2 from google.appengine.api import users # log in users with open id from google.appengine.ext import db # database from google.appengine.api import mail # email # templating import jinja2 # mine import util # helper functions without dependencies # python import json # to output api data import logging # log to appengine log from contextlib import closing # usefule for csv import cStringIO # '' from zipfile import ZipFile, ZIP_DEFLATED # '' import csv import config # import os import mandrill # Load helper classes jinja_environment = jinja2.Environment( autoescape=True, loader=jinja2.FileSystemLoader('templates') ) # Data Structures class Model(db.Model): created = db.DateTimeProperty(auto_now_add=True) class Metric(Model): """A survey and rubric""" name = db.StringProperty(required=True) @classmethod def get_by_name(self, name): if name in config.metrics: self.name = name else: raise Exception("{} is not a registered metric.".format(name)) return self class Group(Model): """Parents of answers used to guarantee consistency for a user""" id = db.StringProperty(required=True) @classmethod def get_group(self, group): result = Group.gql(""" WHERE id = :1 ORDER BY created DESC""", group).get() if not result: logging.warning('No matching group') return result @classmethod def put_group(self, group): return Group(id=group).put() class Result(Model): """Answers to a survey and access permissions""" keys = db.StringListProperty(required=True) metric = db.StringProperty(required=True) answers_json = db.TextProperty(default='') # preferred @classmethod def get_results(self, private_keys, group=None): public_keys = [util.Keys().get_public(k) for k in private_keys] ancestor = Group.get_group(group) if ancestor: results = Result.gql(""" WHERE keys IN :1 AND ANCESTOR IS :2 ORDER BY created DESC""", public_keys, ancestor) else: results = Result.gql(""" WHERE keys IN :1 ORDER BY created DESC""", public_keys) metrics = set() answers = [] for r in results: metrics.add(r.metric) answers.append(r.get_answers()) if len(metrics) > 1: raise Exception("Keys were not all from the same metric: {} {}" .format(public_keys, metrics)) if len(answers) > 0: return {'metric': metrics.pop(), 'answers': answers} else: # No results logging.info('No answers found') return {'metric': 'no responses yet', 'answers': []} @classmethod def put_result(self, keys, metric, answers, group): if group: parent = Group.get_group(group) result = Result(keys=keys, metric=metric, answers_json=answers, parent=parent) else: result = Result(keys=keys, metric=metric, answers_json=answers) return result.put() def get_answers(self): # Some old entities don't have json-based results. Treat them as if # they are empty. This is easier than deleting them all. if self.answers_json: answers = json.loads(self.answers_json) else: answers = {} # Always take the precaution of hashing participant ids, if present. if 'pid' in answers: answers['pid'] = util.hash_participant_id(answers['pid']) return answers # Page Handlers and APIs class Handler(webapp2.RequestHandler): def write(self, *a, **kw): self.response.write(*a, **kw) def render_str(self, template, **params): return jinja_environment.get_template(template).render(**params) def render(self, template, **kw): self.write(self.render_str(template, **kw)) def write_json(self, obj): self.response.headers['Content-Type'] = "text/json; charset=utf-8" self.write(json.dumps(obj)) class MainHandler(Handler): def get(self): self.render('index.html') class TakeHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: self.render(name + '_survey.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class CompleteHandler(Handler): def get(self, name): key = self.request.get('private_key', None) group = self.request.get('group', None) answers = [] if key is None: # If there's no key, then this is a preview. Don't try to load any # answers. logging.info("No key present; rendering preview.") else: try: answers = Result.get_results([key], group)['answers'] except Exception as e: # There was some problem with the keys that were given. Just # display the report with no answers. logging.error('Problem with private key: {}'.format(e)) try: metric = Metric.get_by_name(name) except Exception as e: logging.error('Could not find requested metric: {}'.format(e)) self.render('404.html') return # Render without html escaping self.render(metric.name + '_survey_complete.html', group=group, answers=jinja2.Markup(json.dumps(answers))) class SampleHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: sample_template = name + '_sample_results.html' # If there's a sample report, render that. if os.path.isfile('templates/' + sample_template): self.render(name + '_sample_results.html', name=name) # Some reports can render themselves as a sample if no data is # provided. These don't have a separate sample template. Instead, # just serve up the main report template. else: self.render(name + '_results.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class ResultsHandler(Handler): def get(self, metric=None, keys_str=''): # Multiple keys can be specified, separated by hyphens, in which case # multiple sets of results should be sent to the template. keys = keys_str.split('-') # A group may be applicable here for single-keyed results. group = self.request.get('group') if len(keys) is 1 else None try: results = Result.get_results(keys, group) except Exception as e: # There was some problem with the keys that were given. logging.error('{}'.format(e)) self.render('404.html') return template = None answers = [] if metric: # A specific metric was requested. Check that 1) it exists and 2) # it matches the answers, if any. Then show that metric's results # page. if metric not in config.metrics: logging.error("Unknown metric: {}".format(metric)) template = '404.html'
.format(results['metric'], metric)) template = '404.html' answers = results['answers'] # If the template hasn't been set by an error check above, give the # metric-specific results page. template = template or metric + '_results.html' else: # No specific metric was given. Infer it from the answers, if any, # otherwise show a generic no-results page. if len(results['answers']) > 0: metric = results['metric'] answers = results['answers'] template = metric + '_results.html' else: template = 'no_responses.html' # Render without html escaping. answers = jinja2.Markup(json.dumps(answers)) self.render(template, group=group, answers=answers) class ShareHandler(Handler): def get(self, name): keypair = util.Keys().get_pair() # Render without html escaping metric = Metric.get_by_name(name) self.render( metric.name + '_share.html', name=name, private_key=keypair['private_keys'][0], public_key=keypair['public_keys'][0]) class CsvHandler(Handler): """Return a csv based on the json array passed in for example the following is a valid request (exploded for clarity) /csv? filename=gotcha& headers=["name","age"]& data=[["jack",12],["john",42],["joe",68]] """ def get(self): # Get input data = self.request.get('data') headers = self.request.get('headers') filename = self.request.get('filename').encode('ascii', 'ignore') # Convert to json data = json.loads(data) if headers: headers = json.loads(headers) # Check input if not headers: logging.warning('no headers sent') if not type(headers) == 'list': logging.warning('the headers are not a list') if not data: logging.warning('no data') if not type(data) == 'list': logging.warning('data is not a list') if not len(data) > 0: logging.warning('data has not length') if not all([type(row) == 'list' for row in data]): logging.warning('data contains members which are not lists') # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) # add headers if sent if headers: csv_writer.writerow(headers) # add data for row in data: csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) class AdminCreateHandler(Handler): def get(self): self.render('create.html') class MetricApi(Handler): default_rubric = """<script> pretty_answers = JSON.stringify(mm.answers, null, 4) $('#responses').html(pretty_answers); </script> <pre id='responses'></pre> """ default_survey = """<input name="quest"/>""" def get(self): name = self.request.get('name') if name: metric = Metric.get_by_name(name) if metric: self.write_json(util.to_dict(metric)) else: default_description = "<h3>" + name + "</h3>" self.write_json({ 'survey': self.default_survey, 'rubric': self.default_rubric, 'description': default_description }) else: logging.error('Metric request had no name') self.write_json({'error': 'a name is required'}) class AdminMetricApi(Handler): def post(self): name = self.request.get('name') survey = self.request.get('survey') rubric = self.request.get('rubric') description = self.request.get('description') if name and survey and rubric: Metric( name=name, survey=survey, rubric=rubric, description=description).put() self.write_json({'ok': True}) else: logging.error('Posted metric was missing name, survey, description, or rubric') message = "a name, survey, description, and grading rubric are required" self.write_json({'error': message}) class AdminDataHandler(Handler): """Return a csv of all responses""" def get(self): # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="mm_data.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) headers = ['created', 'metric', 'question', 'answer'] csv_writer.writerow(headers) for result in Result.all(): for k, v in result.get_answers().items(): row = [result.created, result.metric, k, v] csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) logging.info('All data downloaded by admin') logging.info(csv_file.getvalue()) class ResultApi(Handler): def get(self): private_keys = json.loads(self.request.get('private_keys')) group = self.request.get('group') if private_keys: for k in private_keys: k.encode('ascii') try: response = Result.get_results(private_keys, group) except Exception as e: logging.error('{}'.format(e)) response = "Problem with provided keys. {}".format(e) else: logging.error('Requested result without a private key') response = "a private key is required" self.write_json(response) def post(self): keys = json.loads(self.request.get('keys')) metric = self.request.get('metric') group = self.request.get('group') answers = self.request.get('answers') json.loads(answers) # validity check if keys and metric and answers: logging.info("Saving result {} {} {} {}".format(keys, metric, answers, group)) Result.put_result(keys, metric, answers, group) self.write_json({'ok': True}) else: logging.error('Posted result without a metric, keys, or answers') message = "a metric, keys, and answers are required" self.write_json({'error': message}) class KeysApi(Handler): """Hand out public and private keys""" def get(self): keypair = util.Keys().get_pair() Group.put_group(keypair['private_keys'][0]) self.write_json(keypair) class ErrorApi(Handler): """Log javascript errors for debuggind purposes""" def post(self): logging.error('Javascript Error: ' + self.request.get('message')) class EmailApi(Handler): """Send a user an email with their keys""" def post(self): address = self.request.get('address') private_key = self.request.get('private_key') metric = self.request.get('metric') if address and private_key and metric: private_key.encode('ascii') # handle unicode properly public_key = util.Keys().get_public(private_key) root = "http://survey.perts.net" take_link = root + '/take/' + metric + '?public_key=' + public_key results_link = root + '/results/' + private_key message = self.render_str( 'email.html', address=address, take_link=take_link, results_link=results_link, metric=metric) result = mandrill.send( to_address=address, subject="Mindset Meter Study Links", body=message, ) if result: logging.info( 'Email sent to ' + address + ' with the message ' + message ) self.write_json({'ok': True}) else: message = "address, private_key, and metric are necessary to email a user" logging.error(message) self.write_json({'error': message}) class EmailFeedback(Handler): """Allow users to send feedback on the mindset meter. This should be as simple as possible to maximize feedback. """ def post(self): # Reply to is optional reply_to = self.request.get('reply_to') logging.error('reply to is : {}'.format(reply_to)) message = self.request.get('message') to_address = config.from_server_email_address message = self.render_str( 'email_feedback.html', reply_to=reply_to, message=message, ) mandrill.send( to_address=to_address, subject="Feedback on the mindset meter", body=message, ) logging.info('Feedback Email sent to ' + to_address + ' with the message ' + message) self.write_json({'ok': True}) class PageNotFoundHandler(Handler): def get(self): self.render('404.html') app = webapp2.WSGIApplication([ ('/', MainHandler), ('/take/(.*)', TakeHandler), ('/complete/(.*)', CompleteHandler), webapp2.Route(r'/results/<keys_str>', handler=ResultsHandler), webapp2.Route(r'/results/<metric>/<keys_str>', handler=ResultsHandler), ('/sample/(.*)', SampleHandler), ('/share/(.*)', ShareHandler), ('/csv', CsvHandler), ('/api/metric', MetricApi), ('/admin/api/metric', AdminMetricApi), ('/api/result', ResultApi), ('/api/keys', KeysApi), ('/api/error', ErrorApi), ('/api/email', EmailApi), ('/api/email_feedback', EmailFeedback), ('/admin/data', AdminDataHandler), ('/admin/create', AdminCreateHandler), ('/.*', PageNotFoundHandler) ], debug=True)
if len(results['answers']) > 0: if metric != results['metric']: logging.error("Key is from metric {}, but {} requested."
random_line_split
main.py
#!/usr/bin/env python # """ Mindset Meter The mindset meter will test a users psychometrics and report their results. These results will also be shared with the person who invited them to answer the survey. This file will serve as a prototype of the web based mindsetmeter meter and develop its api. bmh October 2012 """ # LIBRARIES # # web app import webapp2 from google.appengine.api import users # log in users with open id from google.appengine.ext import db # database from google.appengine.api import mail # email # templating import jinja2 # mine import util # helper functions without dependencies # python import json # to output api data import logging # log to appengine log from contextlib import closing # usefule for csv import cStringIO # '' from zipfile import ZipFile, ZIP_DEFLATED # '' import csv import config # import os import mandrill # Load helper classes jinja_environment = jinja2.Environment( autoescape=True, loader=jinja2.FileSystemLoader('templates') ) # Data Structures class Model(db.Model): created = db.DateTimeProperty(auto_now_add=True) class Metric(Model): """A survey and rubric""" name = db.StringProperty(required=True) @classmethod def get_by_name(self, name): if name in config.metrics: self.name = name else: raise Exception("{} is not a registered metric.".format(name)) return self class Group(Model): """Parents of answers used to guarantee consistency for a user""" id = db.StringProperty(required=True) @classmethod def get_group(self, group): result = Group.gql(""" WHERE id = :1 ORDER BY created DESC""", group).get() if not result: logging.warning('No matching group') return result @classmethod def put_group(self, group): return Group(id=group).put() class Result(Model): """Answers to a survey and access permissions""" keys = db.StringListProperty(required=True) metric = db.StringProperty(required=True) answers_json = db.TextProperty(default='') # preferred @classmethod def get_results(self, private_keys, group=None): public_keys = [util.Keys().get_public(k) for k in private_keys] ancestor = Group.get_group(group) if ancestor: results = Result.gql(""" WHERE keys IN :1 AND ANCESTOR IS :2 ORDER BY created DESC""", public_keys, ancestor) else: results = Result.gql(""" WHERE keys IN :1 ORDER BY created DESC""", public_keys) metrics = set() answers = [] for r in results: metrics.add(r.metric) answers.append(r.get_answers()) if len(metrics) > 1: raise Exception("Keys were not all from the same metric: {} {}" .format(public_keys, metrics)) if len(answers) > 0: return {'metric': metrics.pop(), 'answers': answers} else: # No results logging.info('No answers found') return {'metric': 'no responses yet', 'answers': []} @classmethod def put_result(self, keys, metric, answers, group): if group: parent = Group.get_group(group) result = Result(keys=keys, metric=metric, answers_json=answers, parent=parent) else: result = Result(keys=keys, metric=metric, answers_json=answers) return result.put() def get_answers(self): # Some old entities don't have json-based results. Treat them as if # they are empty. This is easier than deleting them all. if self.answers_json: answers = json.loads(self.answers_json) else: answers = {} # Always take the precaution of hashing participant ids, if present. if 'pid' in answers: answers['pid'] = util.hash_participant_id(answers['pid']) return answers # Page Handlers and APIs class Handler(webapp2.RequestHandler): def
(self, *a, **kw): self.response.write(*a, **kw) def render_str(self, template, **params): return jinja_environment.get_template(template).render(**params) def render(self, template, **kw): self.write(self.render_str(template, **kw)) def write_json(self, obj): self.response.headers['Content-Type'] = "text/json; charset=utf-8" self.write(json.dumps(obj)) class MainHandler(Handler): def get(self): self.render('index.html') class TakeHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: self.render(name + '_survey.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class CompleteHandler(Handler): def get(self, name): key = self.request.get('private_key', None) group = self.request.get('group', None) answers = [] if key is None: # If there's no key, then this is a preview. Don't try to load any # answers. logging.info("No key present; rendering preview.") else: try: answers = Result.get_results([key], group)['answers'] except Exception as e: # There was some problem with the keys that were given. Just # display the report with no answers. logging.error('Problem with private key: {}'.format(e)) try: metric = Metric.get_by_name(name) except Exception as e: logging.error('Could not find requested metric: {}'.format(e)) self.render('404.html') return # Render without html escaping self.render(metric.name + '_survey_complete.html', group=group, answers=jinja2.Markup(json.dumps(answers))) class SampleHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: sample_template = name + '_sample_results.html' # If there's a sample report, render that. if os.path.isfile('templates/' + sample_template): self.render(name + '_sample_results.html', name=name) # Some reports can render themselves as a sample if no data is # provided. These don't have a separate sample template. Instead, # just serve up the main report template. else: self.render(name + '_results.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class ResultsHandler(Handler): def get(self, metric=None, keys_str=''): # Multiple keys can be specified, separated by hyphens, in which case # multiple sets of results should be sent to the template. keys = keys_str.split('-') # A group may be applicable here for single-keyed results. group = self.request.get('group') if len(keys) is 1 else None try: results = Result.get_results(keys, group) except Exception as e: # There was some problem with the keys that were given. logging.error('{}'.format(e)) self.render('404.html') return template = None answers = [] if metric: # A specific metric was requested. Check that 1) it exists and 2) # it matches the answers, if any. Then show that metric's results # page. if metric not in config.metrics: logging.error("Unknown metric: {}".format(metric)) template = '404.html' if len(results['answers']) > 0: if metric != results['metric']: logging.error("Key is from metric {}, but {} requested." .format(results['metric'], metric)) template = '404.html' answers = results['answers'] # If the template hasn't been set by an error check above, give the # metric-specific results page. template = template or metric + '_results.html' else: # No specific metric was given. Infer it from the answers, if any, # otherwise show a generic no-results page. if len(results['answers']) > 0: metric = results['metric'] answers = results['answers'] template = metric + '_results.html' else: template = 'no_responses.html' # Render without html escaping. answers = jinja2.Markup(json.dumps(answers)) self.render(template, group=group, answers=answers) class ShareHandler(Handler): def get(self, name): keypair = util.Keys().get_pair() # Render without html escaping metric = Metric.get_by_name(name) self.render( metric.name + '_share.html', name=name, private_key=keypair['private_keys'][0], public_key=keypair['public_keys'][0]) class CsvHandler(Handler): """Return a csv based on the json array passed in for example the following is a valid request (exploded for clarity) /csv? filename=gotcha& headers=["name","age"]& data=[["jack",12],["john",42],["joe",68]] """ def get(self): # Get input data = self.request.get('data') headers = self.request.get('headers') filename = self.request.get('filename').encode('ascii', 'ignore') # Convert to json data = json.loads(data) if headers: headers = json.loads(headers) # Check input if not headers: logging.warning('no headers sent') if not type(headers) == 'list': logging.warning('the headers are not a list') if not data: logging.warning('no data') if not type(data) == 'list': logging.warning('data is not a list') if not len(data) > 0: logging.warning('data has not length') if not all([type(row) == 'list' for row in data]): logging.warning('data contains members which are not lists') # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) # add headers if sent if headers: csv_writer.writerow(headers) # add data for row in data: csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) class AdminCreateHandler(Handler): def get(self): self.render('create.html') class MetricApi(Handler): default_rubric = """<script> pretty_answers = JSON.stringify(mm.answers, null, 4) $('#responses').html(pretty_answers); </script> <pre id='responses'></pre> """ default_survey = """<input name="quest"/>""" def get(self): name = self.request.get('name') if name: metric = Metric.get_by_name(name) if metric: self.write_json(util.to_dict(metric)) else: default_description = "<h3>" + name + "</h3>" self.write_json({ 'survey': self.default_survey, 'rubric': self.default_rubric, 'description': default_description }) else: logging.error('Metric request had no name') self.write_json({'error': 'a name is required'}) class AdminMetricApi(Handler): def post(self): name = self.request.get('name') survey = self.request.get('survey') rubric = self.request.get('rubric') description = self.request.get('description') if name and survey and rubric: Metric( name=name, survey=survey, rubric=rubric, description=description).put() self.write_json({'ok': True}) else: logging.error('Posted metric was missing name, survey, description, or rubric') message = "a name, survey, description, and grading rubric are required" self.write_json({'error': message}) class AdminDataHandler(Handler): """Return a csv of all responses""" def get(self): # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="mm_data.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) headers = ['created', 'metric', 'question', 'answer'] csv_writer.writerow(headers) for result in Result.all(): for k, v in result.get_answers().items(): row = [result.created, result.metric, k, v] csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) logging.info('All data downloaded by admin') logging.info(csv_file.getvalue()) class ResultApi(Handler): def get(self): private_keys = json.loads(self.request.get('private_keys')) group = self.request.get('group') if private_keys: for k in private_keys: k.encode('ascii') try: response = Result.get_results(private_keys, group) except Exception as e: logging.error('{}'.format(e)) response = "Problem with provided keys. {}".format(e) else: logging.error('Requested result without a private key') response = "a private key is required" self.write_json(response) def post(self): keys = json.loads(self.request.get('keys')) metric = self.request.get('metric') group = self.request.get('group') answers = self.request.get('answers') json.loads(answers) # validity check if keys and metric and answers: logging.info("Saving result {} {} {} {}".format(keys, metric, answers, group)) Result.put_result(keys, metric, answers, group) self.write_json({'ok': True}) else: logging.error('Posted result without a metric, keys, or answers') message = "a metric, keys, and answers are required" self.write_json({'error': message}) class KeysApi(Handler): """Hand out public and private keys""" def get(self): keypair = util.Keys().get_pair() Group.put_group(keypair['private_keys'][0]) self.write_json(keypair) class ErrorApi(Handler): """Log javascript errors for debuggind purposes""" def post(self): logging.error('Javascript Error: ' + self.request.get('message')) class EmailApi(Handler): """Send a user an email with their keys""" def post(self): address = self.request.get('address') private_key = self.request.get('private_key') metric = self.request.get('metric') if address and private_key and metric: private_key.encode('ascii') # handle unicode properly public_key = util.Keys().get_public(private_key) root = "http://survey.perts.net" take_link = root + '/take/' + metric + '?public_key=' + public_key results_link = root + '/results/' + private_key message = self.render_str( 'email.html', address=address, take_link=take_link, results_link=results_link, metric=metric) result = mandrill.send( to_address=address, subject="Mindset Meter Study Links", body=message, ) if result: logging.info( 'Email sent to ' + address + ' with the message ' + message ) self.write_json({'ok': True}) else: message = "address, private_key, and metric are necessary to email a user" logging.error(message) self.write_json({'error': message}) class EmailFeedback(Handler): """Allow users to send feedback on the mindset meter. This should be as simple as possible to maximize feedback. """ def post(self): # Reply to is optional reply_to = self.request.get('reply_to') logging.error('reply to is : {}'.format(reply_to)) message = self.request.get('message') to_address = config.from_server_email_address message = self.render_str( 'email_feedback.html', reply_to=reply_to, message=message, ) mandrill.send( to_address=to_address, subject="Feedback on the mindset meter", body=message, ) logging.info('Feedback Email sent to ' + to_address + ' with the message ' + message) self.write_json({'ok': True}) class PageNotFoundHandler(Handler): def get(self): self.render('404.html') app = webapp2.WSGIApplication([ ('/', MainHandler), ('/take/(.*)', TakeHandler), ('/complete/(.*)', CompleteHandler), webapp2.Route(r'/results/<keys_str>', handler=ResultsHandler), webapp2.Route(r'/results/<metric>/<keys_str>', handler=ResultsHandler), ('/sample/(.*)', SampleHandler), ('/share/(.*)', ShareHandler), ('/csv', CsvHandler), ('/api/metric', MetricApi), ('/admin/api/metric', AdminMetricApi), ('/api/result', ResultApi), ('/api/keys', KeysApi), ('/api/error', ErrorApi), ('/api/email', EmailApi), ('/api/email_feedback', EmailFeedback), ('/admin/data', AdminDataHandler), ('/admin/create', AdminCreateHandler), ('/.*', PageNotFoundHandler) ], debug=True)
write
identifier_name
main.py
#!/usr/bin/env python # """ Mindset Meter The mindset meter will test a users psychometrics and report their results. These results will also be shared with the person who invited them to answer the survey. This file will serve as a prototype of the web based mindsetmeter meter and develop its api. bmh October 2012 """ # LIBRARIES # # web app import webapp2 from google.appengine.api import users # log in users with open id from google.appengine.ext import db # database from google.appengine.api import mail # email # templating import jinja2 # mine import util # helper functions without dependencies # python import json # to output api data import logging # log to appengine log from contextlib import closing # usefule for csv import cStringIO # '' from zipfile import ZipFile, ZIP_DEFLATED # '' import csv import config # import os import mandrill # Load helper classes jinja_environment = jinja2.Environment( autoescape=True, loader=jinja2.FileSystemLoader('templates') ) # Data Structures class Model(db.Model): created = db.DateTimeProperty(auto_now_add=True) class Metric(Model): """A survey and rubric""" name = db.StringProperty(required=True) @classmethod def get_by_name(self, name): if name in config.metrics: self.name = name else: raise Exception("{} is not a registered metric.".format(name)) return self class Group(Model): """Parents of answers used to guarantee consistency for a user""" id = db.StringProperty(required=True) @classmethod def get_group(self, group): result = Group.gql(""" WHERE id = :1 ORDER BY created DESC""", group).get() if not result: logging.warning('No matching group') return result @classmethod def put_group(self, group): return Group(id=group).put() class Result(Model): """Answers to a survey and access permissions""" keys = db.StringListProperty(required=True) metric = db.StringProperty(required=True) answers_json = db.TextProperty(default='') # preferred @classmethod def get_results(self, private_keys, group=None): public_keys = [util.Keys().get_public(k) for k in private_keys] ancestor = Group.get_group(group) if ancestor: results = Result.gql(""" WHERE keys IN :1 AND ANCESTOR IS :2 ORDER BY created DESC""", public_keys, ancestor) else: results = Result.gql(""" WHERE keys IN :1 ORDER BY created DESC""", public_keys) metrics = set() answers = [] for r in results: metrics.add(r.metric) answers.append(r.get_answers()) if len(metrics) > 1: raise Exception("Keys were not all from the same metric: {} {}" .format(public_keys, metrics)) if len(answers) > 0: return {'metric': metrics.pop(), 'answers': answers} else: # No results logging.info('No answers found') return {'metric': 'no responses yet', 'answers': []} @classmethod def put_result(self, keys, metric, answers, group): if group: parent = Group.get_group(group) result = Result(keys=keys, metric=metric, answers_json=answers, parent=parent) else: result = Result(keys=keys, metric=metric, answers_json=answers) return result.put() def get_answers(self): # Some old entities don't have json-based results. Treat them as if # they are empty. This is easier than deleting them all. if self.answers_json: answers = json.loads(self.answers_json) else: answers = {} # Always take the precaution of hashing participant ids, if present. if 'pid' in answers: answers['pid'] = util.hash_participant_id(answers['pid']) return answers # Page Handlers and APIs class Handler(webapp2.RequestHandler): def write(self, *a, **kw): self.response.write(*a, **kw) def render_str(self, template, **params): return jinja_environment.get_template(template).render(**params) def render(self, template, **kw): self.write(self.render_str(template, **kw)) def write_json(self, obj): self.response.headers['Content-Type'] = "text/json; charset=utf-8" self.write(json.dumps(obj)) class MainHandler(Handler): def get(self): self.render('index.html') class TakeHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: self.render(name + '_survey.html', name=name) else:
class CompleteHandler(Handler): def get(self, name): key = self.request.get('private_key', None) group = self.request.get('group', None) answers = [] if key is None: # If there's no key, then this is a preview. Don't try to load any # answers. logging.info("No key present; rendering preview.") else: try: answers = Result.get_results([key], group)['answers'] except Exception as e: # There was some problem with the keys that were given. Just # display the report with no answers. logging.error('Problem with private key: {}'.format(e)) try: metric = Metric.get_by_name(name) except Exception as e: logging.error('Could not find requested metric: {}'.format(e)) self.render('404.html') return # Render without html escaping self.render(metric.name + '_survey_complete.html', group=group, answers=jinja2.Markup(json.dumps(answers))) class SampleHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: sample_template = name + '_sample_results.html' # If there's a sample report, render that. if os.path.isfile('templates/' + sample_template): self.render(name + '_sample_results.html', name=name) # Some reports can render themselves as a sample if no data is # provided. These don't have a separate sample template. Instead, # just serve up the main report template. else: self.render(name + '_results.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class ResultsHandler(Handler): def get(self, metric=None, keys_str=''): # Multiple keys can be specified, separated by hyphens, in which case # multiple sets of results should be sent to the template. keys = keys_str.split('-') # A group may be applicable here for single-keyed results. group = self.request.get('group') if len(keys) is 1 else None try: results = Result.get_results(keys, group) except Exception as e: # There was some problem with the keys that were given. logging.error('{}'.format(e)) self.render('404.html') return template = None answers = [] if metric: # A specific metric was requested. Check that 1) it exists and 2) # it matches the answers, if any. Then show that metric's results # page. if metric not in config.metrics: logging.error("Unknown metric: {}".format(metric)) template = '404.html' if len(results['answers']) > 0: if metric != results['metric']: logging.error("Key is from metric {}, but {} requested." .format(results['metric'], metric)) template = '404.html' answers = results['answers'] # If the template hasn't been set by an error check above, give the # metric-specific results page. template = template or metric + '_results.html' else: # No specific metric was given. Infer it from the answers, if any, # otherwise show a generic no-results page. if len(results['answers']) > 0: metric = results['metric'] answers = results['answers'] template = metric + '_results.html' else: template = 'no_responses.html' # Render without html escaping. answers = jinja2.Markup(json.dumps(answers)) self.render(template, group=group, answers=answers) class ShareHandler(Handler): def get(self, name): keypair = util.Keys().get_pair() # Render without html escaping metric = Metric.get_by_name(name) self.render( metric.name + '_share.html', name=name, private_key=keypair['private_keys'][0], public_key=keypair['public_keys'][0]) class CsvHandler(Handler): """Return a csv based on the json array passed in for example the following is a valid request (exploded for clarity) /csv? filename=gotcha& headers=["name","age"]& data=[["jack",12],["john",42],["joe",68]] """ def get(self): # Get input data = self.request.get('data') headers = self.request.get('headers') filename = self.request.get('filename').encode('ascii', 'ignore') # Convert to json data = json.loads(data) if headers: headers = json.loads(headers) # Check input if not headers: logging.warning('no headers sent') if not type(headers) == 'list': logging.warning('the headers are not a list') if not data: logging.warning('no data') if not type(data) == 'list': logging.warning('data is not a list') if not len(data) > 0: logging.warning('data has not length') if not all([type(row) == 'list' for row in data]): logging.warning('data contains members which are not lists') # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) # add headers if sent if headers: csv_writer.writerow(headers) # add data for row in data: csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) class AdminCreateHandler(Handler): def get(self): self.render('create.html') class MetricApi(Handler): default_rubric = """<script> pretty_answers = JSON.stringify(mm.answers, null, 4) $('#responses').html(pretty_answers); </script> <pre id='responses'></pre> """ default_survey = """<input name="quest"/>""" def get(self): name = self.request.get('name') if name: metric = Metric.get_by_name(name) if metric: self.write_json(util.to_dict(metric)) else: default_description = "<h3>" + name + "</h3>" self.write_json({ 'survey': self.default_survey, 'rubric': self.default_rubric, 'description': default_description }) else: logging.error('Metric request had no name') self.write_json({'error': 'a name is required'}) class AdminMetricApi(Handler): def post(self): name = self.request.get('name') survey = self.request.get('survey') rubric = self.request.get('rubric') description = self.request.get('description') if name and survey and rubric: Metric( name=name, survey=survey, rubric=rubric, description=description).put() self.write_json({'ok': True}) else: logging.error('Posted metric was missing name, survey, description, or rubric') message = "a name, survey, description, and grading rubric are required" self.write_json({'error': message}) class AdminDataHandler(Handler): """Return a csv of all responses""" def get(self): # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="mm_data.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) headers = ['created', 'metric', 'question', 'answer'] csv_writer.writerow(headers) for result in Result.all(): for k, v in result.get_answers().items(): row = [result.created, result.metric, k, v] csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) logging.info('All data downloaded by admin') logging.info(csv_file.getvalue()) class ResultApi(Handler): def get(self): private_keys = json.loads(self.request.get('private_keys')) group = self.request.get('group') if private_keys: for k in private_keys: k.encode('ascii') try: response = Result.get_results(private_keys, group) except Exception as e: logging.error('{}'.format(e)) response = "Problem with provided keys. {}".format(e) else: logging.error('Requested result without a private key') response = "a private key is required" self.write_json(response) def post(self): keys = json.loads(self.request.get('keys')) metric = self.request.get('metric') group = self.request.get('group') answers = self.request.get('answers') json.loads(answers) # validity check if keys and metric and answers: logging.info("Saving result {} {} {} {}".format(keys, metric, answers, group)) Result.put_result(keys, metric, answers, group) self.write_json({'ok': True}) else: logging.error('Posted result without a metric, keys, or answers') message = "a metric, keys, and answers are required" self.write_json({'error': message}) class KeysApi(Handler): """Hand out public and private keys""" def get(self): keypair = util.Keys().get_pair() Group.put_group(keypair['private_keys'][0]) self.write_json(keypair) class ErrorApi(Handler): """Log javascript errors for debuggind purposes""" def post(self): logging.error('Javascript Error: ' + self.request.get('message')) class EmailApi(Handler): """Send a user an email with their keys""" def post(self): address = self.request.get('address') private_key = self.request.get('private_key') metric = self.request.get('metric') if address and private_key and metric: private_key.encode('ascii') # handle unicode properly public_key = util.Keys().get_public(private_key) root = "http://survey.perts.net" take_link = root + '/take/' + metric + '?public_key=' + public_key results_link = root + '/results/' + private_key message = self.render_str( 'email.html', address=address, take_link=take_link, results_link=results_link, metric=metric) result = mandrill.send( to_address=address, subject="Mindset Meter Study Links", body=message, ) if result: logging.info( 'Email sent to ' + address + ' with the message ' + message ) self.write_json({'ok': True}) else: message = "address, private_key, and metric are necessary to email a user" logging.error(message) self.write_json({'error': message}) class EmailFeedback(Handler): """Allow users to send feedback on the mindset meter. This should be as simple as possible to maximize feedback. """ def post(self): # Reply to is optional reply_to = self.request.get('reply_to') logging.error('reply to is : {}'.format(reply_to)) message = self.request.get('message') to_address = config.from_server_email_address message = self.render_str( 'email_feedback.html', reply_to=reply_to, message=message, ) mandrill.send( to_address=to_address, subject="Feedback on the mindset meter", body=message, ) logging.info('Feedback Email sent to ' + to_address + ' with the message ' + message) self.write_json({'ok': True}) class PageNotFoundHandler(Handler): def get(self): self.render('404.html') app = webapp2.WSGIApplication([ ('/', MainHandler), ('/take/(.*)', TakeHandler), ('/complete/(.*)', CompleteHandler), webapp2.Route(r'/results/<keys_str>', handler=ResultsHandler), webapp2.Route(r'/results/<metric>/<keys_str>', handler=ResultsHandler), ('/sample/(.*)', SampleHandler), ('/share/(.*)', ShareHandler), ('/csv', CsvHandler), ('/api/metric', MetricApi), ('/admin/api/metric', AdminMetricApi), ('/api/result', ResultApi), ('/api/keys', KeysApi), ('/api/error', ErrorApi), ('/api/email', EmailApi), ('/api/email_feedback', EmailFeedback), ('/admin/data', AdminDataHandler), ('/admin/create', AdminCreateHandler), ('/.*', PageNotFoundHandler) ], debug=True)
logging.error('Could not find requested metric') self.render('404.html')
conditional_block
main.py
#!/usr/bin/env python # """ Mindset Meter The mindset meter will test a users psychometrics and report their results. These results will also be shared with the person who invited them to answer the survey. This file will serve as a prototype of the web based mindsetmeter meter and develop its api. bmh October 2012 """ # LIBRARIES # # web app import webapp2 from google.appengine.api import users # log in users with open id from google.appengine.ext import db # database from google.appengine.api import mail # email # templating import jinja2 # mine import util # helper functions without dependencies # python import json # to output api data import logging # log to appengine log from contextlib import closing # usefule for csv import cStringIO # '' from zipfile import ZipFile, ZIP_DEFLATED # '' import csv import config # import os import mandrill # Load helper classes jinja_environment = jinja2.Environment( autoescape=True, loader=jinja2.FileSystemLoader('templates') ) # Data Structures class Model(db.Model): created = db.DateTimeProperty(auto_now_add=True) class Metric(Model): """A survey and rubric""" name = db.StringProperty(required=True) @classmethod def get_by_name(self, name): if name in config.metrics: self.name = name else: raise Exception("{} is not a registered metric.".format(name)) return self class Group(Model): """Parents of answers used to guarantee consistency for a user""" id = db.StringProperty(required=True) @classmethod def get_group(self, group): result = Group.gql(""" WHERE id = :1 ORDER BY created DESC""", group).get() if not result: logging.warning('No matching group') return result @classmethod def put_group(self, group): return Group(id=group).put() class Result(Model): """Answers to a survey and access permissions""" keys = db.StringListProperty(required=True) metric = db.StringProperty(required=True) answers_json = db.TextProperty(default='') # preferred @classmethod def get_results(self, private_keys, group=None): public_keys = [util.Keys().get_public(k) for k in private_keys] ancestor = Group.get_group(group) if ancestor: results = Result.gql(""" WHERE keys IN :1 AND ANCESTOR IS :2 ORDER BY created DESC""", public_keys, ancestor) else: results = Result.gql(""" WHERE keys IN :1 ORDER BY created DESC""", public_keys) metrics = set() answers = [] for r in results: metrics.add(r.metric) answers.append(r.get_answers()) if len(metrics) > 1: raise Exception("Keys were not all from the same metric: {} {}" .format(public_keys, metrics)) if len(answers) > 0: return {'metric': metrics.pop(), 'answers': answers} else: # No results logging.info('No answers found') return {'metric': 'no responses yet', 'answers': []} @classmethod def put_result(self, keys, metric, answers, group): if group: parent = Group.get_group(group) result = Result(keys=keys, metric=metric, answers_json=answers, parent=parent) else: result = Result(keys=keys, metric=metric, answers_json=answers) return result.put() def get_answers(self): # Some old entities don't have json-based results. Treat them as if # they are empty. This is easier than deleting them all. if self.answers_json: answers = json.loads(self.answers_json) else: answers = {} # Always take the precaution of hashing participant ids, if present. if 'pid' in answers: answers['pid'] = util.hash_participant_id(answers['pid']) return answers # Page Handlers and APIs class Handler(webapp2.RequestHandler): def write(self, *a, **kw): self.response.write(*a, **kw) def render_str(self, template, **params): return jinja_environment.get_template(template).render(**params) def render(self, template, **kw): self.write(self.render_str(template, **kw)) def write_json(self, obj): self.response.headers['Content-Type'] = "text/json; charset=utf-8" self.write(json.dumps(obj)) class MainHandler(Handler): def get(self): self.render('index.html') class TakeHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: self.render(name + '_survey.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class CompleteHandler(Handler): def get(self, name): key = self.request.get('private_key', None) group = self.request.get('group', None) answers = [] if key is None: # If there's no key, then this is a preview. Don't try to load any # answers. logging.info("No key present; rendering preview.") else: try: answers = Result.get_results([key], group)['answers'] except Exception as e: # There was some problem with the keys that were given. Just # display the report with no answers. logging.error('Problem with private key: {}'.format(e)) try: metric = Metric.get_by_name(name) except Exception as e: logging.error('Could not find requested metric: {}'.format(e)) self.render('404.html') return # Render without html escaping self.render(metric.name + '_survey_complete.html', group=group, answers=jinja2.Markup(json.dumps(answers))) class SampleHandler(Handler): def get(self, name): metric = Metric.get_by_name(name) if metric: sample_template = name + '_sample_results.html' # If there's a sample report, render that. if os.path.isfile('templates/' + sample_template): self.render(name + '_sample_results.html', name=name) # Some reports can render themselves as a sample if no data is # provided. These don't have a separate sample template. Instead, # just serve up the main report template. else: self.render(name + '_results.html', name=name) else: logging.error('Could not find requested metric') self.render('404.html') class ResultsHandler(Handler): def get(self, metric=None, keys_str=''): # Multiple keys can be specified, separated by hyphens, in which case # multiple sets of results should be sent to the template. keys = keys_str.split('-') # A group may be applicable here for single-keyed results. group = self.request.get('group') if len(keys) is 1 else None try: results = Result.get_results(keys, group) except Exception as e: # There was some problem with the keys that were given. logging.error('{}'.format(e)) self.render('404.html') return template = None answers = [] if metric: # A specific metric was requested. Check that 1) it exists and 2) # it matches the answers, if any. Then show that metric's results # page. if metric not in config.metrics: logging.error("Unknown metric: {}".format(metric)) template = '404.html' if len(results['answers']) > 0: if metric != results['metric']: logging.error("Key is from metric {}, but {} requested." .format(results['metric'], metric)) template = '404.html' answers = results['answers'] # If the template hasn't been set by an error check above, give the # metric-specific results page. template = template or metric + '_results.html' else: # No specific metric was given. Infer it from the answers, if any, # otherwise show a generic no-results page. if len(results['answers']) > 0: metric = results['metric'] answers = results['answers'] template = metric + '_results.html' else: template = 'no_responses.html' # Render without html escaping. answers = jinja2.Markup(json.dumps(answers)) self.render(template, group=group, answers=answers) class ShareHandler(Handler): def get(self, name): keypair = util.Keys().get_pair() # Render without html escaping metric = Metric.get_by_name(name) self.render( metric.name + '_share.html', name=name, private_key=keypair['private_keys'][0], public_key=keypair['public_keys'][0]) class CsvHandler(Handler): """Return a csv based on the json array passed in for example the following is a valid request (exploded for clarity) /csv? filename=gotcha& headers=["name","age"]& data=[["jack",12],["john",42],["joe",68]] """ def get(self): # Get input data = self.request.get('data') headers = self.request.get('headers') filename = self.request.get('filename').encode('ascii', 'ignore') # Convert to json data = json.loads(data) if headers: headers = json.loads(headers) # Check input if not headers: logging.warning('no headers sent') if not type(headers) == 'list': logging.warning('the headers are not a list') if not data: logging.warning('no data') if not type(data) == 'list': logging.warning('data is not a list') if not len(data) > 0: logging.warning('data has not length') if not all([type(row) == 'list' for row in data]): logging.warning('data contains members which are not lists') # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) # add headers if sent if headers: csv_writer.writerow(headers) # add data for row in data: csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) class AdminCreateHandler(Handler): def get(self): self.render('create.html') class MetricApi(Handler): default_rubric = """<script> pretty_answers = JSON.stringify(mm.answers, null, 4) $('#responses').html(pretty_answers); </script> <pre id='responses'></pre> """ default_survey = """<input name="quest"/>""" def get(self): name = self.request.get('name') if name: metric = Metric.get_by_name(name) if metric: self.write_json(util.to_dict(metric)) else: default_description = "<h3>" + name + "</h3>" self.write_json({ 'survey': self.default_survey, 'rubric': self.default_rubric, 'description': default_description }) else: logging.error('Metric request had no name') self.write_json({'error': 'a name is required'}) class AdminMetricApi(Handler): def post(self): name = self.request.get('name') survey = self.request.get('survey') rubric = self.request.get('rubric') description = self.request.get('description') if name and survey and rubric: Metric( name=name, survey=survey, rubric=rubric, description=description).put() self.write_json({'ok': True}) else: logging.error('Posted metric was missing name, survey, description, or rubric') message = "a name, survey, description, and grading rubric are required" self.write_json({'error': message}) class AdminDataHandler(Handler): """Return a csv of all responses""" def get(self): # Set up headers for browser to correctly recognize the file self.response.headers['Content-Type'] = 'text/csv' self.response.headers['Content-Disposition'] = 'attachment; filename="mm_data.csv"' # write the csv to a file like string csv_file = cStringIO.StringIO() csv_writer = csv.writer(csv_file) headers = ['created', 'metric', 'question', 'answer'] csv_writer.writerow(headers) for result in Result.all(): for k, v in result.get_answers().items(): row = [result.created, result.metric, k, v] csv_writer.writerow(row) # Emit the files directly to HTTP response stream self.response.out.write(csv_file.getvalue()) logging.info('All data downloaded by admin') logging.info(csv_file.getvalue()) class ResultApi(Handler):
class KeysApi(Handler): """Hand out public and private keys""" def get(self): keypair = util.Keys().get_pair() Group.put_group(keypair['private_keys'][0]) self.write_json(keypair) class ErrorApi(Handler): """Log javascript errors for debuggind purposes""" def post(self): logging.error('Javascript Error: ' + self.request.get('message')) class EmailApi(Handler): """Send a user an email with their keys""" def post(self): address = self.request.get('address') private_key = self.request.get('private_key') metric = self.request.get('metric') if address and private_key and metric: private_key.encode('ascii') # handle unicode properly public_key = util.Keys().get_public(private_key) root = "http://survey.perts.net" take_link = root + '/take/' + metric + '?public_key=' + public_key results_link = root + '/results/' + private_key message = self.render_str( 'email.html', address=address, take_link=take_link, results_link=results_link, metric=metric) result = mandrill.send( to_address=address, subject="Mindset Meter Study Links", body=message, ) if result: logging.info( 'Email sent to ' + address + ' with the message ' + message ) self.write_json({'ok': True}) else: message = "address, private_key, and metric are necessary to email a user" logging.error(message) self.write_json({'error': message}) class EmailFeedback(Handler): """Allow users to send feedback on the mindset meter. This should be as simple as possible to maximize feedback. """ def post(self): # Reply to is optional reply_to = self.request.get('reply_to') logging.error('reply to is : {}'.format(reply_to)) message = self.request.get('message') to_address = config.from_server_email_address message = self.render_str( 'email_feedback.html', reply_to=reply_to, message=message, ) mandrill.send( to_address=to_address, subject="Feedback on the mindset meter", body=message, ) logging.info('Feedback Email sent to ' + to_address + ' with the message ' + message) self.write_json({'ok': True}) class PageNotFoundHandler(Handler): def get(self): self.render('404.html') app = webapp2.WSGIApplication([ ('/', MainHandler), ('/take/(.*)', TakeHandler), ('/complete/(.*)', CompleteHandler), webapp2.Route(r'/results/<keys_str>', handler=ResultsHandler), webapp2.Route(r'/results/<metric>/<keys_str>', handler=ResultsHandler), ('/sample/(.*)', SampleHandler), ('/share/(.*)', ShareHandler), ('/csv', CsvHandler), ('/api/metric', MetricApi), ('/admin/api/metric', AdminMetricApi), ('/api/result', ResultApi), ('/api/keys', KeysApi), ('/api/error', ErrorApi), ('/api/email', EmailApi), ('/api/email_feedback', EmailFeedback), ('/admin/data', AdminDataHandler), ('/admin/create', AdminCreateHandler), ('/.*', PageNotFoundHandler) ], debug=True)
def get(self): private_keys = json.loads(self.request.get('private_keys')) group = self.request.get('group') if private_keys: for k in private_keys: k.encode('ascii') try: response = Result.get_results(private_keys, group) except Exception as e: logging.error('{}'.format(e)) response = "Problem with provided keys. {}".format(e) else: logging.error('Requested result without a private key') response = "a private key is required" self.write_json(response) def post(self): keys = json.loads(self.request.get('keys')) metric = self.request.get('metric') group = self.request.get('group') answers = self.request.get('answers') json.loads(answers) # validity check if keys and metric and answers: logging.info("Saving result {} {} {} {}".format(keys, metric, answers, group)) Result.put_result(keys, metric, answers, group) self.write_json({'ok': True}) else: logging.error('Posted result without a metric, keys, or answers') message = "a metric, keys, and answers are required" self.write_json({'error': message})
identifier_body
server.go
package proxy import ( "bytes" "fmt" "io/ioutil" "net/http" "time" "github.com/zoowii/query_api_proxy/cache" "sync/atomic" "github.com/bitly/go-simplejson" "github.com/zoowii/betterjson" "gopkg.in/yaml.v2" ) func ReadConfigFromYaml(yamlConfigFilePath string) (*Config, error) { conf := NewConfig() yamlFile, err := ioutil.ReadFile(yamlConfigFilePath) if err != nil { return nil, err } err = yaml.Unmarshal(yamlFile, conf) if err != nil { return nil, err } return conf, nil } func writeErrorToJSONRpcResponse(w http.ResponseWriter, id interface{}, errorCode int, errMsg string) { resBytes, err := MakeJSONRpcErrorResponse(id, errorCode, errMsg, nil) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) { resBytes, err := MakeJSONRpcSuccessResponse(id, result) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeDirectlyToResponse(w http.ResponseWriter, data []byte) { w.Write(data) } func isNeedCacheMethod(config *Config, rpcReqMethod string) bool { if config.CacheAllJSONRpcMethods { return true } if config.CacheJSONRpcMethodsWithBlacklist { for _, m := range config.CacheJSONRpcMethodsBlacklist { if m == rpcReqMethod { return false } } return true } return false } func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse { res := new(WorkerResponse) res.WorkerIndex = workerIndex res.WorkerUri = workerUri cache1Key := workerUri cache2Key := string(reqBody) // because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^' cacheKey := cache1Key + "^" + cache2Key if isNeedCacheMethod(config, rpcReqMethod) { if cacheValue, ok := cache.Get(cacheKey); ok { resultBytes := cacheValue.([]byte) resultJSON, jsonErr := simplejson.NewJson(resultBytes) if jsonErr == nil { res.Result = resultBytes res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop return res } } } workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody)) if workerResErr != nil { res.Error = workerResErr } else { defer workerHttpRes.Body.Close() readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body) if readErr != nil { res.Error = readErr } else { res.Result = readBytes resultJSON, jsonErr := simplejson.NewJson(readBytes) if jsonErr == nil { res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) { cacheValue := readBytes cache.SetWithDefaultExpire(cacheKey, cacheValue) } } } } return res } // send request to workers one by one. now just send to all workers func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if len(workerUris) <= startWorkerIndex { return } go func() { res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody) if startWorkerIndex == (len(workerUris) - 1) { responsesChannel <- res return } if res.IsValidJSONRpcResult() { responsesChannel <- res } else { asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody) } }() } func selectWorkersToProvideService(config *Config, workerUris []string, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte)
var workerLoadBalanceIndex uint32 = 0 // teturns the order of workers according to the mode in the configuration func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string { if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() { return workerUris } else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() { firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1) firstIdx = firstIdx % uint32(len(workerUris)) newSeq := []string{workerUris[firstIdx]} beforeWorkers := workerUris[0:firstIdx] afterWorkers := workerUris[firstIdx+1:] newSeq = append(newSeq, beforeWorkers...) newSeq = append(newSeq, afterWorkers...) return newSeq } else { panic("not supported config select_worker_mode") return nil } } // TODO: use jsonrpcmethods whitelist if enabled // TODO: fault handler // TODO: rate limit // TODO: workers health check func StartServer(config *Config) { if config.LogPath == "" { config.LogPath = "./query_api_proxy.log" } logger, err := NewLogger(config.LogPath) if err != nil { panic("error happen when open log " + err.Error()) return } defer logger.Close() proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { // only support POST json-rpc now writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now") return } defer r.Body.Close() reqBody, err := ioutil.ReadAll(r.Body) if err != nil { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } var rpcReqId interface{} = 1 var rpcReqMethod string = "" reqBodyJSON, err := simplejson.NewJson(reqBody) if err == nil { tryGetReqId, err := reqBodyJSON.Get("id").Int() if err == nil { rpcReqId = tryGetReqId } else { tryGetReqId, err := reqBodyJSON.Get("id").String() if err == nil { rpcReqId = tryGetReqId } } method, err := reqBodyJSON.Get("method").String() if err == nil { rpcReqMethod = method } else { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } } responsesChannel := make(chan *WorkerResponse, len(config.Workers)) workerUris := getWorkersSequenceBySelectMode(config, config.Workers) selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody) timeout := false breakIterWorkerResponses := false workerResponses := make([]*WorkerResponse, 0) for i := 0; i < len(workerUris); i++ { if timeout { break } select { case res := <-responsesChannel: workerResponses = append(workerResponses, res) if config.IsOnlyOnceSelectMode() { breakIterWorkerResponses = true break } if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true break } if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true } case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second): timeout = true } if breakIterWorkerResponses { break } } // compare workerResponses to select most same responses hasSomeErrorInWorkerResponses := false if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) { hasSomeErrorInWorkerResponses = true } if len(workerResponses) < 1 { hasSomeErrorInWorkerResponses = true } type WorkerResponseSameGroup struct { ResultJSON *simplejson.Json ResultBytes []byte Count int } if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 { // find first not empty result json and final response for _, workerRes := range workerResponses { if workerRes.ResultJSON != nil { writeDirectlyToResponse(w, workerRes.Result) return } } } var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0) var maxCountGroup *WorkerResponseSameGroup = nil for _, workerRes := range workerResponses { if workerRes.ResultJSON == nil { hasSomeErrorInWorkerResponses = true continue } resultJSONDigest := betterjson.FromNotEmptySimpleJson(workerRes.ResultJSON).DigestJSONForEqual() var group *WorkerResponseSameGroup var foundGroup bool if group, foundGroup = sameWorkerResponseGroups[resultJSONDigest]; foundGroup { group.Count += 1 } else { group = new(WorkerResponseSameGroup) group.ResultJSON = workerRes.ResultJSON group.ResultBytes = workerRes.Result group.Count = 1 sameWorkerResponseGroups[resultJSONDigest] = group } if maxCountGroup == nil { maxCountGroup = group } else { if group.Count > maxCountGroup.Count { maxCountGroup = group } } } if len(sameWorkerResponseGroups) < 1 || maxCountGroup == nil { hasSomeErrorInWorkerResponses = true errMsg := fmt.Sprintf("workers send zero responses when dispatch request %s\n", string(reqBody)) logger.Print(errMsg) writeErrorToJSONRpcResponse(w, rpcReqId, JSONRPC_INTERNAL_ERROR_CODE, "no responses until timeout") return } if len(sameWorkerResponseGroups) > 1 { hasSomeErrorInWorkerResponses = true logger.Printf("workers send some distinct responses when dispath request %s\n", string(reqBody)) } if hasSomeErrorInWorkerResponses { logger.Printf("some errors in worker responses when dispath request %s\n", string(reqBody)) } writeDirectlyToResponse(w, maxCountGroup.ResultBytes) }) var logRequest = func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { timer1 := time.NewTimer(time.Millisecond) logger.Printf("%s %s %s\n", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) timer1.Stop() usedTime := <-timer1.C logger.Printf("using %.2f seconds\n", (float64(usedTime.Nanosecond()) * 1.0 / 1000000000)) }) } _ = logRequest s := &http.Server{ Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), Handler: proxyHandlerFunc, // logRequest(proxyHandlerFunc), ReadTimeout: 50 * time.Second, WriteTimeout: 100 * time.Second, MaxHeaderBytes: 1 << 20, } s.SetKeepAlivesEnabled(false) logger.Printf("starting server at %s:%d\n", config.Host, config.Port) logger.Fatal(s.ListenAndServe()) }
{ if config.IsOnlyFirstSelectMode() { asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody) } else { for workerIndex, workerUri := range workerUris { go func(workerIndex int, workerUri string) { res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody) responsesChannel <- res }(workerIndex, workerUri) if config.IsOnlyOnceSelectMode() { break } } } }
identifier_body
server.go
package proxy import ( "bytes" "fmt" "io/ioutil" "net/http" "time" "github.com/zoowii/query_api_proxy/cache" "sync/atomic" "github.com/bitly/go-simplejson" "github.com/zoowii/betterjson" "gopkg.in/yaml.v2" ) func ReadConfigFromYaml(yamlConfigFilePath string) (*Config, error) { conf := NewConfig() yamlFile, err := ioutil.ReadFile(yamlConfigFilePath) if err != nil { return nil, err } err = yaml.Unmarshal(yamlFile, conf) if err != nil { return nil, err } return conf, nil } func writeErrorToJSONRpcResponse(w http.ResponseWriter, id interface{}, errorCode int, errMsg string) { resBytes, err := MakeJSONRpcErrorResponse(id, errorCode, errMsg, nil) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) { resBytes, err := MakeJSONRpcSuccessResponse(id, result) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeDirectlyToResponse(w http.ResponseWriter, data []byte) { w.Write(data) } func isNeedCacheMethod(config *Config, rpcReqMethod string) bool { if config.CacheAllJSONRpcMethods { return true } if config.CacheJSONRpcMethodsWithBlacklist { for _, m := range config.CacheJSONRpcMethodsBlacklist { if m == rpcReqMethod { return false } } return true } return false } func
(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse { res := new(WorkerResponse) res.WorkerIndex = workerIndex res.WorkerUri = workerUri cache1Key := workerUri cache2Key := string(reqBody) // because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^' cacheKey := cache1Key + "^" + cache2Key if isNeedCacheMethod(config, rpcReqMethod) { if cacheValue, ok := cache.Get(cacheKey); ok { resultBytes := cacheValue.([]byte) resultJSON, jsonErr := simplejson.NewJson(resultBytes) if jsonErr == nil { res.Result = resultBytes res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop return res } } } workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody)) if workerResErr != nil { res.Error = workerResErr } else { defer workerHttpRes.Body.Close() readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body) if readErr != nil { res.Error = readErr } else { res.Result = readBytes resultJSON, jsonErr := simplejson.NewJson(readBytes) if jsonErr == nil { res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) { cacheValue := readBytes cache.SetWithDefaultExpire(cacheKey, cacheValue) } } } } return res } // send request to workers one by one. now just send to all workers func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if len(workerUris) <= startWorkerIndex { return } go func() { res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody) if startWorkerIndex == (len(workerUris) - 1) { responsesChannel <- res return } if res.IsValidJSONRpcResult() { responsesChannel <- res } else { asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody) } }() } func selectWorkersToProvideService(config *Config, workerUris []string, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if config.IsOnlyFirstSelectMode() { asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody) } else { for workerIndex, workerUri := range workerUris { go func(workerIndex int, workerUri string) { res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody) responsesChannel <- res }(workerIndex, workerUri) if config.IsOnlyOnceSelectMode() { break } } } } var workerLoadBalanceIndex uint32 = 0 // teturns the order of workers according to the mode in the configuration func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string { if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() { return workerUris } else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() { firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1) firstIdx = firstIdx % uint32(len(workerUris)) newSeq := []string{workerUris[firstIdx]} beforeWorkers := workerUris[0:firstIdx] afterWorkers := workerUris[firstIdx+1:] newSeq = append(newSeq, beforeWorkers...) newSeq = append(newSeq, afterWorkers...) return newSeq } else { panic("not supported config select_worker_mode") return nil } } // TODO: use jsonrpcmethods whitelist if enabled // TODO: fault handler // TODO: rate limit // TODO: workers health check func StartServer(config *Config) { if config.LogPath == "" { config.LogPath = "./query_api_proxy.log" } logger, err := NewLogger(config.LogPath) if err != nil { panic("error happen when open log " + err.Error()) return } defer logger.Close() proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { // only support POST json-rpc now writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now") return } defer r.Body.Close() reqBody, err := ioutil.ReadAll(r.Body) if err != nil { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } var rpcReqId interface{} = 1 var rpcReqMethod string = "" reqBodyJSON, err := simplejson.NewJson(reqBody) if err == nil { tryGetReqId, err := reqBodyJSON.Get("id").Int() if err == nil { rpcReqId = tryGetReqId } else { tryGetReqId, err := reqBodyJSON.Get("id").String() if err == nil { rpcReqId = tryGetReqId } } method, err := reqBodyJSON.Get("method").String() if err == nil { rpcReqMethod = method } else { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } } responsesChannel := make(chan *WorkerResponse, len(config.Workers)) workerUris := getWorkersSequenceBySelectMode(config, config.Workers) selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody) timeout := false breakIterWorkerResponses := false workerResponses := make([]*WorkerResponse, 0) for i := 0; i < len(workerUris); i++ { if timeout { break } select { case res := <-responsesChannel: workerResponses = append(workerResponses, res) if config.IsOnlyOnceSelectMode() { breakIterWorkerResponses = true break } if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true break } if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true } case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second): timeout = true } if breakIterWorkerResponses { break } } // compare workerResponses to select most same responses hasSomeErrorInWorkerResponses := false if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) { hasSomeErrorInWorkerResponses = true } if len(workerResponses) < 1 { hasSomeErrorInWorkerResponses = true } type WorkerResponseSameGroup struct { ResultJSON *simplejson.Json ResultBytes []byte Count int } if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 { // find first not empty result json and final response for _, workerRes := range workerResponses { if workerRes.ResultJSON != nil { writeDirectlyToResponse(w, workerRes.Result) return } } } var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0) var maxCountGroup *WorkerResponseSameGroup = nil for _, workerRes := range workerResponses { if workerRes.ResultJSON == nil { hasSomeErrorInWorkerResponses = true continue } resultJSONDigest := betterjson.FromNotEmptySimpleJson(workerRes.ResultJSON).DigestJSONForEqual() var group *WorkerResponseSameGroup var foundGroup bool if group, foundGroup = sameWorkerResponseGroups[resultJSONDigest]; foundGroup { group.Count += 1 } else { group = new(WorkerResponseSameGroup) group.ResultJSON = workerRes.ResultJSON group.ResultBytes = workerRes.Result group.Count = 1 sameWorkerResponseGroups[resultJSONDigest] = group } if maxCountGroup == nil { maxCountGroup = group } else { if group.Count > maxCountGroup.Count { maxCountGroup = group } } } if len(sameWorkerResponseGroups) < 1 || maxCountGroup == nil { hasSomeErrorInWorkerResponses = true errMsg := fmt.Sprintf("workers send zero responses when dispatch request %s\n", string(reqBody)) logger.Print(errMsg) writeErrorToJSONRpcResponse(w, rpcReqId, JSONRPC_INTERNAL_ERROR_CODE, "no responses until timeout") return } if len(sameWorkerResponseGroups) > 1 { hasSomeErrorInWorkerResponses = true logger.Printf("workers send some distinct responses when dispath request %s\n", string(reqBody)) } if hasSomeErrorInWorkerResponses { logger.Printf("some errors in worker responses when dispath request %s\n", string(reqBody)) } writeDirectlyToResponse(w, maxCountGroup.ResultBytes) }) var logRequest = func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { timer1 := time.NewTimer(time.Millisecond) logger.Printf("%s %s %s\n", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) timer1.Stop() usedTime := <-timer1.C logger.Printf("using %.2f seconds\n", (float64(usedTime.Nanosecond()) * 1.0 / 1000000000)) }) } _ = logRequest s := &http.Server{ Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), Handler: proxyHandlerFunc, // logRequest(proxyHandlerFunc), ReadTimeout: 50 * time.Second, WriteTimeout: 100 * time.Second, MaxHeaderBytes: 1 << 20, } s.SetKeepAlivesEnabled(false) logger.Printf("starting server at %s:%d\n", config.Host, config.Port) logger.Fatal(s.ListenAndServe()) }
useWorkerToProvideService
identifier_name
server.go
package proxy import (
"bytes" "fmt" "io/ioutil" "net/http" "time" "github.com/zoowii/query_api_proxy/cache" "sync/atomic" "github.com/bitly/go-simplejson" "github.com/zoowii/betterjson" "gopkg.in/yaml.v2" ) func ReadConfigFromYaml(yamlConfigFilePath string) (*Config, error) { conf := NewConfig() yamlFile, err := ioutil.ReadFile(yamlConfigFilePath) if err != nil { return nil, err } err = yaml.Unmarshal(yamlFile, conf) if err != nil { return nil, err } return conf, nil } func writeErrorToJSONRpcResponse(w http.ResponseWriter, id interface{}, errorCode int, errMsg string) { resBytes, err := MakeJSONRpcErrorResponse(id, errorCode, errMsg, nil) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) { resBytes, err := MakeJSONRpcSuccessResponse(id, result) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeDirectlyToResponse(w http.ResponseWriter, data []byte) { w.Write(data) } func isNeedCacheMethod(config *Config, rpcReqMethod string) bool { if config.CacheAllJSONRpcMethods { return true } if config.CacheJSONRpcMethodsWithBlacklist { for _, m := range config.CacheJSONRpcMethodsBlacklist { if m == rpcReqMethod { return false } } return true } return false } func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse { res := new(WorkerResponse) res.WorkerIndex = workerIndex res.WorkerUri = workerUri cache1Key := workerUri cache2Key := string(reqBody) // because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^' cacheKey := cache1Key + "^" + cache2Key if isNeedCacheMethod(config, rpcReqMethod) { if cacheValue, ok := cache.Get(cacheKey); ok { resultBytes := cacheValue.([]byte) resultJSON, jsonErr := simplejson.NewJson(resultBytes) if jsonErr == nil { res.Result = resultBytes res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop return res } } } workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody)) if workerResErr != nil { res.Error = workerResErr } else { defer workerHttpRes.Body.Close() readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body) if readErr != nil { res.Error = readErr } else { res.Result = readBytes resultJSON, jsonErr := simplejson.NewJson(readBytes) if jsonErr == nil { res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) { cacheValue := readBytes cache.SetWithDefaultExpire(cacheKey, cacheValue) } } } } return res } // send request to workers one by one. now just send to all workers func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if len(workerUris) <= startWorkerIndex { return } go func() { res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody) if startWorkerIndex == (len(workerUris) - 1) { responsesChannel <- res return } if res.IsValidJSONRpcResult() { responsesChannel <- res } else { asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody) } }() } func selectWorkersToProvideService(config *Config, workerUris []string, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if config.IsOnlyFirstSelectMode() { asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody) } else { for workerIndex, workerUri := range workerUris { go func(workerIndex int, workerUri string) { res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody) responsesChannel <- res }(workerIndex, workerUri) if config.IsOnlyOnceSelectMode() { break } } } } var workerLoadBalanceIndex uint32 = 0 // teturns the order of workers according to the mode in the configuration func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string { if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() { return workerUris } else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() { firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1) firstIdx = firstIdx % uint32(len(workerUris)) newSeq := []string{workerUris[firstIdx]} beforeWorkers := workerUris[0:firstIdx] afterWorkers := workerUris[firstIdx+1:] newSeq = append(newSeq, beforeWorkers...) newSeq = append(newSeq, afterWorkers...) return newSeq } else { panic("not supported config select_worker_mode") return nil } } // TODO: use jsonrpcmethods whitelist if enabled // TODO: fault handler // TODO: rate limit // TODO: workers health check func StartServer(config *Config) { if config.LogPath == "" { config.LogPath = "./query_api_proxy.log" } logger, err := NewLogger(config.LogPath) if err != nil { panic("error happen when open log " + err.Error()) return } defer logger.Close() proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { // only support POST json-rpc now writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now") return } defer r.Body.Close() reqBody, err := ioutil.ReadAll(r.Body) if err != nil { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } var rpcReqId interface{} = 1 var rpcReqMethod string = "" reqBodyJSON, err := simplejson.NewJson(reqBody) if err == nil { tryGetReqId, err := reqBodyJSON.Get("id").Int() if err == nil { rpcReqId = tryGetReqId } else { tryGetReqId, err := reqBodyJSON.Get("id").String() if err == nil { rpcReqId = tryGetReqId } } method, err := reqBodyJSON.Get("method").String() if err == nil { rpcReqMethod = method } else { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } } responsesChannel := make(chan *WorkerResponse, len(config.Workers)) workerUris := getWorkersSequenceBySelectMode(config, config.Workers) selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody) timeout := false breakIterWorkerResponses := false workerResponses := make([]*WorkerResponse, 0) for i := 0; i < len(workerUris); i++ { if timeout { break } select { case res := <-responsesChannel: workerResponses = append(workerResponses, res) if config.IsOnlyOnceSelectMode() { breakIterWorkerResponses = true break } if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true break } if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true } case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second): timeout = true } if breakIterWorkerResponses { break } } // compare workerResponses to select most same responses hasSomeErrorInWorkerResponses := false if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) { hasSomeErrorInWorkerResponses = true } if len(workerResponses) < 1 { hasSomeErrorInWorkerResponses = true } type WorkerResponseSameGroup struct { ResultJSON *simplejson.Json ResultBytes []byte Count int } if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 { // find first not empty result json and final response for _, workerRes := range workerResponses { if workerRes.ResultJSON != nil { writeDirectlyToResponse(w, workerRes.Result) return } } } var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0) var maxCountGroup *WorkerResponseSameGroup = nil for _, workerRes := range workerResponses { if workerRes.ResultJSON == nil { hasSomeErrorInWorkerResponses = true continue } resultJSONDigest := betterjson.FromNotEmptySimpleJson(workerRes.ResultJSON).DigestJSONForEqual() var group *WorkerResponseSameGroup var foundGroup bool if group, foundGroup = sameWorkerResponseGroups[resultJSONDigest]; foundGroup { group.Count += 1 } else { group = new(WorkerResponseSameGroup) group.ResultJSON = workerRes.ResultJSON group.ResultBytes = workerRes.Result group.Count = 1 sameWorkerResponseGroups[resultJSONDigest] = group } if maxCountGroup == nil { maxCountGroup = group } else { if group.Count > maxCountGroup.Count { maxCountGroup = group } } } if len(sameWorkerResponseGroups) < 1 || maxCountGroup == nil { hasSomeErrorInWorkerResponses = true errMsg := fmt.Sprintf("workers send zero responses when dispatch request %s\n", string(reqBody)) logger.Print(errMsg) writeErrorToJSONRpcResponse(w, rpcReqId, JSONRPC_INTERNAL_ERROR_CODE, "no responses until timeout") return } if len(sameWorkerResponseGroups) > 1 { hasSomeErrorInWorkerResponses = true logger.Printf("workers send some distinct responses when dispath request %s\n", string(reqBody)) } if hasSomeErrorInWorkerResponses { logger.Printf("some errors in worker responses when dispath request %s\n", string(reqBody)) } writeDirectlyToResponse(w, maxCountGroup.ResultBytes) }) var logRequest = func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { timer1 := time.NewTimer(time.Millisecond) logger.Printf("%s %s %s\n", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) timer1.Stop() usedTime := <-timer1.C logger.Printf("using %.2f seconds\n", (float64(usedTime.Nanosecond()) * 1.0 / 1000000000)) }) } _ = logRequest s := &http.Server{ Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), Handler: proxyHandlerFunc, // logRequest(proxyHandlerFunc), ReadTimeout: 50 * time.Second, WriteTimeout: 100 * time.Second, MaxHeaderBytes: 1 << 20, } s.SetKeepAlivesEnabled(false) logger.Printf("starting server at %s:%d\n", config.Host, config.Port) logger.Fatal(s.ListenAndServe()) }
random_line_split
server.go
package proxy import ( "bytes" "fmt" "io/ioutil" "net/http" "time" "github.com/zoowii/query_api_proxy/cache" "sync/atomic" "github.com/bitly/go-simplejson" "github.com/zoowii/betterjson" "gopkg.in/yaml.v2" ) func ReadConfigFromYaml(yamlConfigFilePath string) (*Config, error) { conf := NewConfig() yamlFile, err := ioutil.ReadFile(yamlConfigFilePath) if err != nil { return nil, err } err = yaml.Unmarshal(yamlFile, conf) if err != nil { return nil, err } return conf, nil } func writeErrorToJSONRpcResponse(w http.ResponseWriter, id interface{}, errorCode int, errMsg string) { resBytes, err := MakeJSONRpcErrorResponse(id, errorCode, errMsg, nil) if err != nil
else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) { resBytes, err := MakeJSONRpcSuccessResponse(id, result) if err != nil { w.Write([]byte(err.Error())) } else { w.Header().Set("Content-Type", "application/json") w.Write(resBytes) } } func writeDirectlyToResponse(w http.ResponseWriter, data []byte) { w.Write(data) } func isNeedCacheMethod(config *Config, rpcReqMethod string) bool { if config.CacheAllJSONRpcMethods { return true } if config.CacheJSONRpcMethodsWithBlacklist { for _, m := range config.CacheJSONRpcMethodsBlacklist { if m == rpcReqMethod { return false } } return true } return false } func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse { res := new(WorkerResponse) res.WorkerIndex = workerIndex res.WorkerUri = workerUri cache1Key := workerUri cache2Key := string(reqBody) // because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^' cacheKey := cache1Key + "^" + cache2Key if isNeedCacheMethod(config, rpcReqMethod) { if cacheValue, ok := cache.Get(cacheKey); ok { resultBytes := cacheValue.([]byte) resultJSON, jsonErr := simplejson.NewJson(resultBytes) if jsonErr == nil { res.Result = resultBytes res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop return res } } } workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody)) if workerResErr != nil { res.Error = workerResErr } else { defer workerHttpRes.Body.Close() readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body) if readErr != nil { res.Error = readErr } else { res.Result = readBytes resultJSON, jsonErr := simplejson.NewJson(readBytes) if jsonErr == nil { res.ResultJSON = resultJSON // TODO: digest result json and when got > 1/2 same results, just break the loop if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) { cacheValue := readBytes cache.SetWithDefaultExpire(cacheKey, cacheValue) } } } } return res } // send request to workers one by one. now just send to all workers func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if len(workerUris) <= startWorkerIndex { return } go func() { res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody) if startWorkerIndex == (len(workerUris) - 1) { responsesChannel <- res return } if res.IsValidJSONRpcResult() { responsesChannel <- res } else { asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody) } }() } func selectWorkersToProvideService(config *Config, workerUris []string, responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) { if config.IsOnlyFirstSelectMode() { asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody) } else { for workerIndex, workerUri := range workerUris { go func(workerIndex int, workerUri string) { res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody) responsesChannel <- res }(workerIndex, workerUri) if config.IsOnlyOnceSelectMode() { break } } } } var workerLoadBalanceIndex uint32 = 0 // teturns the order of workers according to the mode in the configuration func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string { if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() { return workerUris } else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() { firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1) firstIdx = firstIdx % uint32(len(workerUris)) newSeq := []string{workerUris[firstIdx]} beforeWorkers := workerUris[0:firstIdx] afterWorkers := workerUris[firstIdx+1:] newSeq = append(newSeq, beforeWorkers...) newSeq = append(newSeq, afterWorkers...) return newSeq } else { panic("not supported config select_worker_mode") return nil } } // TODO: use jsonrpcmethods whitelist if enabled // TODO: fault handler // TODO: rate limit // TODO: workers health check func StartServer(config *Config) { if config.LogPath == "" { config.LogPath = "./query_api_proxy.log" } logger, err := NewLogger(config.LogPath) if err != nil { panic("error happen when open log " + err.Error()) return } defer logger.Close() proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { // only support POST json-rpc now writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now") return } defer r.Body.Close() reqBody, err := ioutil.ReadAll(r.Body) if err != nil { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } var rpcReqId interface{} = 1 var rpcReqMethod string = "" reqBodyJSON, err := simplejson.NewJson(reqBody) if err == nil { tryGetReqId, err := reqBodyJSON.Get("id").Int() if err == nil { rpcReqId = tryGetReqId } else { tryGetReqId, err := reqBodyJSON.Get("id").String() if err == nil { rpcReqId = tryGetReqId } } method, err := reqBodyJSON.Get("method").String() if err == nil { rpcReqMethod = method } else { writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error()) return } } responsesChannel := make(chan *WorkerResponse, len(config.Workers)) workerUris := getWorkersSequenceBySelectMode(config, config.Workers) selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody) timeout := false breakIterWorkerResponses := false workerResponses := make([]*WorkerResponse, 0) for i := 0; i < len(workerUris); i++ { if timeout { break } select { case res := <-responsesChannel: workerResponses = append(workerResponses, res) if config.IsOnlyOnceSelectMode() { breakIterWorkerResponses = true break } if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true break } if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil { breakIterWorkerResponses = true } case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second): timeout = true } if breakIterWorkerResponses { break } } // compare workerResponses to select most same responses hasSomeErrorInWorkerResponses := false if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) { hasSomeErrorInWorkerResponses = true } if len(workerResponses) < 1 { hasSomeErrorInWorkerResponses = true } type WorkerResponseSameGroup struct { ResultJSON *simplejson.Json ResultBytes []byte Count int } if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 { // find first not empty result json and final response for _, workerRes := range workerResponses { if workerRes.ResultJSON != nil { writeDirectlyToResponse(w, workerRes.Result) return } } } var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0) var maxCountGroup *WorkerResponseSameGroup = nil for _, workerRes := range workerResponses { if workerRes.ResultJSON == nil { hasSomeErrorInWorkerResponses = true continue } resultJSONDigest := betterjson.FromNotEmptySimpleJson(workerRes.ResultJSON).DigestJSONForEqual() var group *WorkerResponseSameGroup var foundGroup bool if group, foundGroup = sameWorkerResponseGroups[resultJSONDigest]; foundGroup { group.Count += 1 } else { group = new(WorkerResponseSameGroup) group.ResultJSON = workerRes.ResultJSON group.ResultBytes = workerRes.Result group.Count = 1 sameWorkerResponseGroups[resultJSONDigest] = group } if maxCountGroup == nil { maxCountGroup = group } else { if group.Count > maxCountGroup.Count { maxCountGroup = group } } } if len(sameWorkerResponseGroups) < 1 || maxCountGroup == nil { hasSomeErrorInWorkerResponses = true errMsg := fmt.Sprintf("workers send zero responses when dispatch request %s\n", string(reqBody)) logger.Print(errMsg) writeErrorToJSONRpcResponse(w, rpcReqId, JSONRPC_INTERNAL_ERROR_CODE, "no responses until timeout") return } if len(sameWorkerResponseGroups) > 1 { hasSomeErrorInWorkerResponses = true logger.Printf("workers send some distinct responses when dispath request %s\n", string(reqBody)) } if hasSomeErrorInWorkerResponses { logger.Printf("some errors in worker responses when dispath request %s\n", string(reqBody)) } writeDirectlyToResponse(w, maxCountGroup.ResultBytes) }) var logRequest = func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { timer1 := time.NewTimer(time.Millisecond) logger.Printf("%s %s %s\n", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) timer1.Stop() usedTime := <-timer1.C logger.Printf("using %.2f seconds\n", (float64(usedTime.Nanosecond()) * 1.0 / 1000000000)) }) } _ = logRequest s := &http.Server{ Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), Handler: proxyHandlerFunc, // logRequest(proxyHandlerFunc), ReadTimeout: 50 * time.Second, WriteTimeout: 100 * time.Second, MaxHeaderBytes: 1 << 20, } s.SetKeepAlivesEnabled(false) logger.Printf("starting server at %s:%d\n", config.Host, config.Port) logger.Fatal(s.ListenAndServe()) }
{ w.Write([]byte(err.Error())) }
conditional_block
lib.rs
//! Adler-32 checksum implementation. //! //! This implementation features: //! //! - Permissively licensed (0BSD) clean-room implementation. //! - Zero dependencies. //! - Decent performance (3-4 GB/s). //! - `#![no_std]` support (with `default-features = false`). #![doc(html_root_url = "https://docs.rs/adler/0.2.2")] // Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default #![doc(test(attr(deny(unused_imports, unused_must_use))))] #![cfg_attr(docsrs, feature(doc_cfg))] #![warn(missing_debug_implementations)] #![forbid(unsafe_code)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] extern crate core as std; use std::hash::Hasher; use std::ops::{AddAssign, MulAssign, RemAssign}; #[cfg(feature = "std")] use std::io::{self, BufRead}; /// Adler-32 checksum calculator. /// /// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default /// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via /// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. /// /// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any /// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although /// that is not recommended (while every checksum is a hash, they are not necessarily good at being /// one). /// /// [`new`]: #method.new /// [`from_checksum`]: #method.from_checksum /// [`checksum`]: #method.checksum #[derive(Debug, Copy, Clone)] pub struct Adler32 { a: u16, b: u16, } impl Adler32 { /// Creates a new Adler-32 instance with default state. #[inline] pub fn new() -> Self { Self::default() } /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. /// /// This allows resuming checksum calculation without having to keep the `Adler32` instance /// around. /// /// # Example /// /// ``` /// # use adler::Adler32; /// let parts = [ /// "rust", /// "acean", /// ]; /// let whole = adler::adler32_slice(b"rustacean"); /// /// let mut sum = Adler32::new(); /// sum.write_slice(parts[0].as_bytes()); /// let partial = sum.checksum(); /// /// // ...later /// /// let mut sum = Adler32::from_checksum(partial); /// sum.write_slice(parts[1].as_bytes()); /// assert_eq!(sum.checksum(), whole); /// ``` #[inline] pub fn from_checksum(sum: u32) -> Self
/// Returns the calculated checksum at this point in time. #[inline] pub fn checksum(&self) -> u32 { (u32::from(self.b) << 16) | u32::from(self.a) } /// Adds `bytes` to the checksum calculation. /// /// If efficiency matters, this should be called with Byte slices that contain at least a few /// thousand Bytes. pub fn write_slice(&mut self, bytes: &[u8]) { // The basic algorithm is, for every byte: // a = (a + byte) % MOD // b = (b + a) % MOD // where MOD = 65521. // // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. // - We use 32-bit arithmetic in this function. // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` // operation. // // According to Wikipedia, b is calculated as follows for non-incremental checksumming: // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account // for the previous values of a and b, as well as treat every input Byte as being 255: // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 // Or in other words: // b_inc = n*65520 + n(n+1)/2*255 // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. // 2^32-65521 = n*65520 + n(n+1)/2*255 // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. // // On top of the optimization outlined above, the algorithm can also be parallelized with a // bit more work: // // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). // // If we fix some value k<N and rewrite indices 1, ..., N as // // 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k, // // then we can express a and b in terms of sums of smaller sequences kb and ka: // // ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k // kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k // // a = ka(1) + ka(2) + ... + ka(k) + 1 // b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N // // We use this insight to unroll the main loop and process k=4 bytes at a time. // The resulting code is highly amenable to SIMD acceleration, although the immediate speedups // stem from increased pipeline parallelism rather than auto-vectorization. // // This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\ // en/develop/articles/fast-computation-of-fletcher-checksums.html] const MOD: u32 = 65521; const CHUNK_SIZE: usize = 5552 * 4; let mut a = u32::from(self.a); let mut b = u32::from(self.b); let mut a_vec = U32X4([0; 4]); let mut b_vec = a_vec; let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4); // iterate over 4 bytes at a time let chunk_iter = bytes.chunks_exact(CHUNK_SIZE); let remainder_chunk = chunk_iter.remainder(); for chunk in chunk_iter { for byte_vec in chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += CHUNK_SIZE as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; } // special-case the final chunk because it may be shorter than the rest for byte_vec in remainder_chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += remainder_chunk.len() as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; // combine the sub-sum results into the main sum b_vec *= 4; b_vec.0[1] += MOD - a_vec.0[1]; b_vec.0[2] += (MOD - a_vec.0[2]) * 2; b_vec.0[3] += (MOD - a_vec.0[3]) * 3; for &av in a_vec.0.iter() { a += av; } for &bv in b_vec.0.iter() { b += bv; } // iterate over the remaining few bytes in serial for &byte in remainder.iter() { a += u32::from(byte); b += a; } self.a = (a % MOD) as u16; self.b = (b % MOD) as u16; } } impl Default for Adler32 { #[inline] fn default() -> Self { Adler32 { a: 1, b: 0 } } } impl Hasher for Adler32 { #[inline] fn finish(&self) -> u64 { u64::from(self.checksum()) } fn write(&mut self, bytes: &[u8]) { self.write_slice(bytes); } } /// Calculates the Adler-32 checksum of a byte slice. pub fn adler32_slice(data: &[u8]) -> u32 { let mut h = Adler32::new(); h.write_slice(data); h.checksum() } #[derive(Copy, Clone)] struct U32X4([u32; 4]); impl U32X4 { fn from(bytes: &[u8]) -> Self { U32X4([ u32::from(bytes[0]), u32::from(bytes[1]), u32::from(bytes[2]), u32::from(bytes[3]), ]) } } impl AddAssign<Self> for U32X4 { fn add_assign(&mut self, other: Self) { for (s, o) in self.0.iter_mut().zip(other.0.iter()) { *s += o; } } } impl RemAssign<u32> for U32X4 { fn rem_assign(&mut self, quotient: u32) { for s in self.0.iter_mut() { *s %= quotient; } } } impl MulAssign<u32> for U32X4 { fn mul_assign(&mut self, rhs: u32) { for s in self.0.iter_mut() { *s *= rhs; } } } /// Calculates the Adler-32 checksum of a `BufRead`'s contents. /// /// The passed `BufRead` implementor will be read until it reaches EOF. /// /// If you only have a `Read` implementor, wrap it in `std::io::BufReader`. #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> { let mut h = Adler32::new(); loop { let len = { let buf = reader.fill_buf()?; if buf.is_empty() { return Ok(h.checksum()); } h.write_slice(buf); buf.len() }; reader.consume(len); } } #[cfg(test)] mod tests { use super::*; use std::io::BufReader; #[test] fn zeroes() { assert_eq!(adler32_slice(&[]), 1); assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); assert_eq!(adler32_slice(&[0; 100]), 0x00640001); assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); } #[test] fn ones() { assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); } #[test] fn mixed() { assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); } /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. #[test] fn wiki() { assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); } #[test] fn resume() { let mut adler = Adler32::new(); adler.write_slice(&[0xff; 1024]); let partial = adler.checksum(); assert_eq!(partial, 0x79a6fc2e); // from above adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above // Make sure that we can resume computing from the partial checksum via `from_checksum`. let mut adler = Adler32::from_checksum(partial); adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above } #[test] fn bufread() { fn test(data: &[u8], checksum: u32) { // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. let mut buf = BufReader::new(data); let real_sum = adler32_reader(&mut buf).unwrap(); assert_eq!(checksum, real_sum); } test(&[], 1); test(&[0; 1024], 0x04000001); test(&[0; 1024 * 1024], 0x00f00001); test(&[0xA5; 1024 * 1024], 0xd5009ab1); } }
{ Adler32 { a: sum as u16, b: (sum >> 16) as u16, } }
identifier_body
lib.rs
//! Adler-32 checksum implementation. //! //! This implementation features: //! //! - Permissively licensed (0BSD) clean-room implementation. //! - Zero dependencies. //! - Decent performance (3-4 GB/s). //! - `#![no_std]` support (with `default-features = false`). #![doc(html_root_url = "https://docs.rs/adler/0.2.2")] // Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default #![doc(test(attr(deny(unused_imports, unused_must_use))))] #![cfg_attr(docsrs, feature(doc_cfg))] #![warn(missing_debug_implementations)] #![forbid(unsafe_code)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] extern crate core as std; use std::hash::Hasher; use std::ops::{AddAssign, MulAssign, RemAssign}; #[cfg(feature = "std")] use std::io::{self, BufRead}; /// Adler-32 checksum calculator. /// /// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default /// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via /// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. /// /// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any /// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although /// that is not recommended (while every checksum is a hash, they are not necessarily good at being /// one). /// /// [`new`]: #method.new /// [`from_checksum`]: #method.from_checksum /// [`checksum`]: #method.checksum #[derive(Debug, Copy, Clone)] pub struct
{ a: u16, b: u16, } impl Adler32 { /// Creates a new Adler-32 instance with default state. #[inline] pub fn new() -> Self { Self::default() } /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. /// /// This allows resuming checksum calculation without having to keep the `Adler32` instance /// around. /// /// # Example /// /// ``` /// # use adler::Adler32; /// let parts = [ /// "rust", /// "acean", /// ]; /// let whole = adler::adler32_slice(b"rustacean"); /// /// let mut sum = Adler32::new(); /// sum.write_slice(parts[0].as_bytes()); /// let partial = sum.checksum(); /// /// // ...later /// /// let mut sum = Adler32::from_checksum(partial); /// sum.write_slice(parts[1].as_bytes()); /// assert_eq!(sum.checksum(), whole); /// ``` #[inline] pub fn from_checksum(sum: u32) -> Self { Adler32 { a: sum as u16, b: (sum >> 16) as u16, } } /// Returns the calculated checksum at this point in time. #[inline] pub fn checksum(&self) -> u32 { (u32::from(self.b) << 16) | u32::from(self.a) } /// Adds `bytes` to the checksum calculation. /// /// If efficiency matters, this should be called with Byte slices that contain at least a few /// thousand Bytes. pub fn write_slice(&mut self, bytes: &[u8]) { // The basic algorithm is, for every byte: // a = (a + byte) % MOD // b = (b + a) % MOD // where MOD = 65521. // // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. // - We use 32-bit arithmetic in this function. // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` // operation. // // According to Wikipedia, b is calculated as follows for non-incremental checksumming: // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account // for the previous values of a and b, as well as treat every input Byte as being 255: // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 // Or in other words: // b_inc = n*65520 + n(n+1)/2*255 // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. // 2^32-65521 = n*65520 + n(n+1)/2*255 // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. // // On top of the optimization outlined above, the algorithm can also be parallelized with a // bit more work: // // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). // // If we fix some value k<N and rewrite indices 1, ..., N as // // 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k, // // then we can express a and b in terms of sums of smaller sequences kb and ka: // // ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k // kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k // // a = ka(1) + ka(2) + ... + ka(k) + 1 // b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N // // We use this insight to unroll the main loop and process k=4 bytes at a time. // The resulting code is highly amenable to SIMD acceleration, although the immediate speedups // stem from increased pipeline parallelism rather than auto-vectorization. // // This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\ // en/develop/articles/fast-computation-of-fletcher-checksums.html] const MOD: u32 = 65521; const CHUNK_SIZE: usize = 5552 * 4; let mut a = u32::from(self.a); let mut b = u32::from(self.b); let mut a_vec = U32X4([0; 4]); let mut b_vec = a_vec; let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4); // iterate over 4 bytes at a time let chunk_iter = bytes.chunks_exact(CHUNK_SIZE); let remainder_chunk = chunk_iter.remainder(); for chunk in chunk_iter { for byte_vec in chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += CHUNK_SIZE as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; } // special-case the final chunk because it may be shorter than the rest for byte_vec in remainder_chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += remainder_chunk.len() as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; // combine the sub-sum results into the main sum b_vec *= 4; b_vec.0[1] += MOD - a_vec.0[1]; b_vec.0[2] += (MOD - a_vec.0[2]) * 2; b_vec.0[3] += (MOD - a_vec.0[3]) * 3; for &av in a_vec.0.iter() { a += av; } for &bv in b_vec.0.iter() { b += bv; } // iterate over the remaining few bytes in serial for &byte in remainder.iter() { a += u32::from(byte); b += a; } self.a = (a % MOD) as u16; self.b = (b % MOD) as u16; } } impl Default for Adler32 { #[inline] fn default() -> Self { Adler32 { a: 1, b: 0 } } } impl Hasher for Adler32 { #[inline] fn finish(&self) -> u64 { u64::from(self.checksum()) } fn write(&mut self, bytes: &[u8]) { self.write_slice(bytes); } } /// Calculates the Adler-32 checksum of a byte slice. pub fn adler32_slice(data: &[u8]) -> u32 { let mut h = Adler32::new(); h.write_slice(data); h.checksum() } #[derive(Copy, Clone)] struct U32X4([u32; 4]); impl U32X4 { fn from(bytes: &[u8]) -> Self { U32X4([ u32::from(bytes[0]), u32::from(bytes[1]), u32::from(bytes[2]), u32::from(bytes[3]), ]) } } impl AddAssign<Self> for U32X4 { fn add_assign(&mut self, other: Self) { for (s, o) in self.0.iter_mut().zip(other.0.iter()) { *s += o; } } } impl RemAssign<u32> for U32X4 { fn rem_assign(&mut self, quotient: u32) { for s in self.0.iter_mut() { *s %= quotient; } } } impl MulAssign<u32> for U32X4 { fn mul_assign(&mut self, rhs: u32) { for s in self.0.iter_mut() { *s *= rhs; } } } /// Calculates the Adler-32 checksum of a `BufRead`'s contents. /// /// The passed `BufRead` implementor will be read until it reaches EOF. /// /// If you only have a `Read` implementor, wrap it in `std::io::BufReader`. #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> { let mut h = Adler32::new(); loop { let len = { let buf = reader.fill_buf()?; if buf.is_empty() { return Ok(h.checksum()); } h.write_slice(buf); buf.len() }; reader.consume(len); } } #[cfg(test)] mod tests { use super::*; use std::io::BufReader; #[test] fn zeroes() { assert_eq!(adler32_slice(&[]), 1); assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); assert_eq!(adler32_slice(&[0; 100]), 0x00640001); assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); } #[test] fn ones() { assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); } #[test] fn mixed() { assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); } /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. #[test] fn wiki() { assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); } #[test] fn resume() { let mut adler = Adler32::new(); adler.write_slice(&[0xff; 1024]); let partial = adler.checksum(); assert_eq!(partial, 0x79a6fc2e); // from above adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above // Make sure that we can resume computing from the partial checksum via `from_checksum`. let mut adler = Adler32::from_checksum(partial); adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above } #[test] fn bufread() { fn test(data: &[u8], checksum: u32) { // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. let mut buf = BufReader::new(data); let real_sum = adler32_reader(&mut buf).unwrap(); assert_eq!(checksum, real_sum); } test(&[], 1); test(&[0; 1024], 0x04000001); test(&[0; 1024 * 1024], 0x00f00001); test(&[0xA5; 1024 * 1024], 0xd5009ab1); } }
Adler32
identifier_name
lib.rs
//! Adler-32 checksum implementation. //!
//! - `#![no_std]` support (with `default-features = false`). #![doc(html_root_url = "https://docs.rs/adler/0.2.2")] // Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default #![doc(test(attr(deny(unused_imports, unused_must_use))))] #![cfg_attr(docsrs, feature(doc_cfg))] #![warn(missing_debug_implementations)] #![forbid(unsafe_code)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] extern crate core as std; use std::hash::Hasher; use std::ops::{AddAssign, MulAssign, RemAssign}; #[cfg(feature = "std")] use std::io::{self, BufRead}; /// Adler-32 checksum calculator. /// /// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default /// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via /// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. /// /// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any /// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although /// that is not recommended (while every checksum is a hash, they are not necessarily good at being /// one). /// /// [`new`]: #method.new /// [`from_checksum`]: #method.from_checksum /// [`checksum`]: #method.checksum #[derive(Debug, Copy, Clone)] pub struct Adler32 { a: u16, b: u16, } impl Adler32 { /// Creates a new Adler-32 instance with default state. #[inline] pub fn new() -> Self { Self::default() } /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. /// /// This allows resuming checksum calculation without having to keep the `Adler32` instance /// around. /// /// # Example /// /// ``` /// # use adler::Adler32; /// let parts = [ /// "rust", /// "acean", /// ]; /// let whole = adler::adler32_slice(b"rustacean"); /// /// let mut sum = Adler32::new(); /// sum.write_slice(parts[0].as_bytes()); /// let partial = sum.checksum(); /// /// // ...later /// /// let mut sum = Adler32::from_checksum(partial); /// sum.write_slice(parts[1].as_bytes()); /// assert_eq!(sum.checksum(), whole); /// ``` #[inline] pub fn from_checksum(sum: u32) -> Self { Adler32 { a: sum as u16, b: (sum >> 16) as u16, } } /// Returns the calculated checksum at this point in time. #[inline] pub fn checksum(&self) -> u32 { (u32::from(self.b) << 16) | u32::from(self.a) } /// Adds `bytes` to the checksum calculation. /// /// If efficiency matters, this should be called with Byte slices that contain at least a few /// thousand Bytes. pub fn write_slice(&mut self, bytes: &[u8]) { // The basic algorithm is, for every byte: // a = (a + byte) % MOD // b = (b + a) % MOD // where MOD = 65521. // // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. // - We use 32-bit arithmetic in this function. // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` // operation. // // According to Wikipedia, b is calculated as follows for non-incremental checksumming: // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account // for the previous values of a and b, as well as treat every input Byte as being 255: // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 // Or in other words: // b_inc = n*65520 + n(n+1)/2*255 // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. // 2^32-65521 = n*65520 + n(n+1)/2*255 // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. // // On top of the optimization outlined above, the algorithm can also be parallelized with a // bit more work: // // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). // // If we fix some value k<N and rewrite indices 1, ..., N as // // 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k, // // then we can express a and b in terms of sums of smaller sequences kb and ka: // // ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k // kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k // // a = ka(1) + ka(2) + ... + ka(k) + 1 // b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N // // We use this insight to unroll the main loop and process k=4 bytes at a time. // The resulting code is highly amenable to SIMD acceleration, although the immediate speedups // stem from increased pipeline parallelism rather than auto-vectorization. // // This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\ // en/develop/articles/fast-computation-of-fletcher-checksums.html] const MOD: u32 = 65521; const CHUNK_SIZE: usize = 5552 * 4; let mut a = u32::from(self.a); let mut b = u32::from(self.b); let mut a_vec = U32X4([0; 4]); let mut b_vec = a_vec; let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4); // iterate over 4 bytes at a time let chunk_iter = bytes.chunks_exact(CHUNK_SIZE); let remainder_chunk = chunk_iter.remainder(); for chunk in chunk_iter { for byte_vec in chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += CHUNK_SIZE as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; } // special-case the final chunk because it may be shorter than the rest for byte_vec in remainder_chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += remainder_chunk.len() as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; // combine the sub-sum results into the main sum b_vec *= 4; b_vec.0[1] += MOD - a_vec.0[1]; b_vec.0[2] += (MOD - a_vec.0[2]) * 2; b_vec.0[3] += (MOD - a_vec.0[3]) * 3; for &av in a_vec.0.iter() { a += av; } for &bv in b_vec.0.iter() { b += bv; } // iterate over the remaining few bytes in serial for &byte in remainder.iter() { a += u32::from(byte); b += a; } self.a = (a % MOD) as u16; self.b = (b % MOD) as u16; } } impl Default for Adler32 { #[inline] fn default() -> Self { Adler32 { a: 1, b: 0 } } } impl Hasher for Adler32 { #[inline] fn finish(&self) -> u64 { u64::from(self.checksum()) } fn write(&mut self, bytes: &[u8]) { self.write_slice(bytes); } } /// Calculates the Adler-32 checksum of a byte slice. pub fn adler32_slice(data: &[u8]) -> u32 { let mut h = Adler32::new(); h.write_slice(data); h.checksum() } #[derive(Copy, Clone)] struct U32X4([u32; 4]); impl U32X4 { fn from(bytes: &[u8]) -> Self { U32X4([ u32::from(bytes[0]), u32::from(bytes[1]), u32::from(bytes[2]), u32::from(bytes[3]), ]) } } impl AddAssign<Self> for U32X4 { fn add_assign(&mut self, other: Self) { for (s, o) in self.0.iter_mut().zip(other.0.iter()) { *s += o; } } } impl RemAssign<u32> for U32X4 { fn rem_assign(&mut self, quotient: u32) { for s in self.0.iter_mut() { *s %= quotient; } } } impl MulAssign<u32> for U32X4 { fn mul_assign(&mut self, rhs: u32) { for s in self.0.iter_mut() { *s *= rhs; } } } /// Calculates the Adler-32 checksum of a `BufRead`'s contents. /// /// The passed `BufRead` implementor will be read until it reaches EOF. /// /// If you only have a `Read` implementor, wrap it in `std::io::BufReader`. #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> { let mut h = Adler32::new(); loop { let len = { let buf = reader.fill_buf()?; if buf.is_empty() { return Ok(h.checksum()); } h.write_slice(buf); buf.len() }; reader.consume(len); } } #[cfg(test)] mod tests { use super::*; use std::io::BufReader; #[test] fn zeroes() { assert_eq!(adler32_slice(&[]), 1); assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); assert_eq!(adler32_slice(&[0; 100]), 0x00640001); assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); } #[test] fn ones() { assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); } #[test] fn mixed() { assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); } /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. #[test] fn wiki() { assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); } #[test] fn resume() { let mut adler = Adler32::new(); adler.write_slice(&[0xff; 1024]); let partial = adler.checksum(); assert_eq!(partial, 0x79a6fc2e); // from above adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above // Make sure that we can resume computing from the partial checksum via `from_checksum`. let mut adler = Adler32::from_checksum(partial); adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above } #[test] fn bufread() { fn test(data: &[u8], checksum: u32) { // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. let mut buf = BufReader::new(data); let real_sum = adler32_reader(&mut buf).unwrap(); assert_eq!(checksum, real_sum); } test(&[], 1); test(&[0; 1024], 0x04000001); test(&[0; 1024 * 1024], 0x00f00001); test(&[0xA5; 1024 * 1024], 0xd5009ab1); } }
//! This implementation features: //! //! - Permissively licensed (0BSD) clean-room implementation. //! - Zero dependencies. //! - Decent performance (3-4 GB/s).
random_line_split
lib.rs
//! Adler-32 checksum implementation. //! //! This implementation features: //! //! - Permissively licensed (0BSD) clean-room implementation. //! - Zero dependencies. //! - Decent performance (3-4 GB/s). //! - `#![no_std]` support (with `default-features = false`). #![doc(html_root_url = "https://docs.rs/adler/0.2.2")] // Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default #![doc(test(attr(deny(unused_imports, unused_must_use))))] #![cfg_attr(docsrs, feature(doc_cfg))] #![warn(missing_debug_implementations)] #![forbid(unsafe_code)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] extern crate core as std; use std::hash::Hasher; use std::ops::{AddAssign, MulAssign, RemAssign}; #[cfg(feature = "std")] use std::io::{self, BufRead}; /// Adler-32 checksum calculator. /// /// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default /// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via /// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. /// /// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any /// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although /// that is not recommended (while every checksum is a hash, they are not necessarily good at being /// one). /// /// [`new`]: #method.new /// [`from_checksum`]: #method.from_checksum /// [`checksum`]: #method.checksum #[derive(Debug, Copy, Clone)] pub struct Adler32 { a: u16, b: u16, } impl Adler32 { /// Creates a new Adler-32 instance with default state. #[inline] pub fn new() -> Self { Self::default() } /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. /// /// This allows resuming checksum calculation without having to keep the `Adler32` instance /// around. /// /// # Example /// /// ``` /// # use adler::Adler32; /// let parts = [ /// "rust", /// "acean", /// ]; /// let whole = adler::adler32_slice(b"rustacean"); /// /// let mut sum = Adler32::new(); /// sum.write_slice(parts[0].as_bytes()); /// let partial = sum.checksum(); /// /// // ...later /// /// let mut sum = Adler32::from_checksum(partial); /// sum.write_slice(parts[1].as_bytes()); /// assert_eq!(sum.checksum(), whole); /// ``` #[inline] pub fn from_checksum(sum: u32) -> Self { Adler32 { a: sum as u16, b: (sum >> 16) as u16, } } /// Returns the calculated checksum at this point in time. #[inline] pub fn checksum(&self) -> u32 { (u32::from(self.b) << 16) | u32::from(self.a) } /// Adds `bytes` to the checksum calculation. /// /// If efficiency matters, this should be called with Byte slices that contain at least a few /// thousand Bytes. pub fn write_slice(&mut self, bytes: &[u8]) { // The basic algorithm is, for every byte: // a = (a + byte) % MOD // b = (b + a) % MOD // where MOD = 65521. // // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. // - We use 32-bit arithmetic in this function. // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` // operation. // // According to Wikipedia, b is calculated as follows for non-incremental checksumming: // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account // for the previous values of a and b, as well as treat every input Byte as being 255: // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 // Or in other words: // b_inc = n*65520 + n(n+1)/2*255 // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. // 2^32-65521 = n*65520 + n(n+1)/2*255 // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. // // On top of the optimization outlined above, the algorithm can also be parallelized with a // bit more work: // // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). // // If we fix some value k<N and rewrite indices 1, ..., N as // // 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k, // // then we can express a and b in terms of sums of smaller sequences kb and ka: // // ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k // kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k // // a = ka(1) + ka(2) + ... + ka(k) + 1 // b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N // // We use this insight to unroll the main loop and process k=4 bytes at a time. // The resulting code is highly amenable to SIMD acceleration, although the immediate speedups // stem from increased pipeline parallelism rather than auto-vectorization. // // This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\ // en/develop/articles/fast-computation-of-fletcher-checksums.html] const MOD: u32 = 65521; const CHUNK_SIZE: usize = 5552 * 4; let mut a = u32::from(self.a); let mut b = u32::from(self.b); let mut a_vec = U32X4([0; 4]); let mut b_vec = a_vec; let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4); // iterate over 4 bytes at a time let chunk_iter = bytes.chunks_exact(CHUNK_SIZE); let remainder_chunk = chunk_iter.remainder(); for chunk in chunk_iter { for byte_vec in chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += CHUNK_SIZE as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; } // special-case the final chunk because it may be shorter than the rest for byte_vec in remainder_chunk.chunks_exact(4) { let val = U32X4::from(byte_vec); a_vec += val; b_vec += a_vec; } b += remainder_chunk.len() as u32 * a; a_vec %= MOD; b_vec %= MOD; b %= MOD; // combine the sub-sum results into the main sum b_vec *= 4; b_vec.0[1] += MOD - a_vec.0[1]; b_vec.0[2] += (MOD - a_vec.0[2]) * 2; b_vec.0[3] += (MOD - a_vec.0[3]) * 3; for &av in a_vec.0.iter() { a += av; } for &bv in b_vec.0.iter() { b += bv; } // iterate over the remaining few bytes in serial for &byte in remainder.iter() { a += u32::from(byte); b += a; } self.a = (a % MOD) as u16; self.b = (b % MOD) as u16; } } impl Default for Adler32 { #[inline] fn default() -> Self { Adler32 { a: 1, b: 0 } } } impl Hasher for Adler32 { #[inline] fn finish(&self) -> u64 { u64::from(self.checksum()) } fn write(&mut self, bytes: &[u8]) { self.write_slice(bytes); } } /// Calculates the Adler-32 checksum of a byte slice. pub fn adler32_slice(data: &[u8]) -> u32 { let mut h = Adler32::new(); h.write_slice(data); h.checksum() } #[derive(Copy, Clone)] struct U32X4([u32; 4]); impl U32X4 { fn from(bytes: &[u8]) -> Self { U32X4([ u32::from(bytes[0]), u32::from(bytes[1]), u32::from(bytes[2]), u32::from(bytes[3]), ]) } } impl AddAssign<Self> for U32X4 { fn add_assign(&mut self, other: Self) { for (s, o) in self.0.iter_mut().zip(other.0.iter()) { *s += o; } } } impl RemAssign<u32> for U32X4 { fn rem_assign(&mut self, quotient: u32) { for s in self.0.iter_mut() { *s %= quotient; } } } impl MulAssign<u32> for U32X4 { fn mul_assign(&mut self, rhs: u32) { for s in self.0.iter_mut() { *s *= rhs; } } } /// Calculates the Adler-32 checksum of a `BufRead`'s contents. /// /// The passed `BufRead` implementor will be read until it reaches EOF. /// /// If you only have a `Read` implementor, wrap it in `std::io::BufReader`. #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> { let mut h = Adler32::new(); loop { let len = { let buf = reader.fill_buf()?; if buf.is_empty() {
h.write_slice(buf); buf.len() }; reader.consume(len); } } #[cfg(test)] mod tests { use super::*; use std::io::BufReader; #[test] fn zeroes() { assert_eq!(adler32_slice(&[]), 1); assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); assert_eq!(adler32_slice(&[0; 100]), 0x00640001); assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); } #[test] fn ones() { assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); } #[test] fn mixed() { assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); } /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. #[test] fn wiki() { assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); } #[test] fn resume() { let mut adler = Adler32::new(); adler.write_slice(&[0xff; 1024]); let partial = adler.checksum(); assert_eq!(partial, 0x79a6fc2e); // from above adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above // Make sure that we can resume computing from the partial checksum via `from_checksum`. let mut adler = Adler32::from_checksum(partial); adler.write_slice(&[0xff; 1024 * 1024 - 1024]); assert_eq!(adler.checksum(), 0x8e88ef11); // from above } #[test] fn bufread() { fn test(data: &[u8], checksum: u32) { // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. let mut buf = BufReader::new(data); let real_sum = adler32_reader(&mut buf).unwrap(); assert_eq!(checksum, real_sum); } test(&[], 1); test(&[0; 1024], 0x04000001); test(&[0; 1024 * 1024], 0x00f00001); test(&[0xA5; 1024 * 1024], 0xd5009ab1); } }
return Ok(h.checksum()); }
conditional_block
main.rs
extern crate rand; extern crate getopts; #[macro_use] extern crate serde_derive; extern crate serde; extern crate serde_json; extern crate bincode; extern crate rayon; pub mod aabb; pub mod background; pub mod bvh; pub mod camera; pub mod deserialize; pub mod dielectric; pub mod disc; pub mod emitter; pub mod hitable; pub mod hitable_list; pub mod lambertian; pub mod material; pub mod metal; pub mod mixture; // pub mod phong; pub mod plane; pub mod random; pub mod ray; pub mod rectangle; pub mod scene; pub mod sampling; pub mod sphere; pub mod sphere_geometry; pub mod triangle_mesh; pub mod vector; pub mod tests; pub mod ward; use aabb::AABB; use background::*; use bvh::BVH; use camera::Camera; use deserialize::*; use disc::*; use getopts::Options; use hitable::*; use rand::Rng; use random::*; use ray::Ray; use vector::Vec3; use std::cmp; use std::env; use std::fs::File; use std::io::BufReader; use std::io::BufWriter; use std::io::Write; use rayon::prelude::*; ////////////////////////////////////////////////////////////////////////////// fn color(ray: &Ray, world: &Hitable, background: &Background, lights: &Vec<AABB>) -> Vec3 where
////////////////////////////////////////////////////////////////////////////// // my own bastardized version of a float file format, horrendously inefficient fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String) { println!("Writing output to {}", format!("{}.linear_rgb", file_prefix)); let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap()); let ny = image.len()/subsample; let nx = image[0].len()/subsample; let ns = samples_so_far as f64; f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap(); for super_j in (0..ny).rev() { for super_i in 0..nx { let mut super_pixel = Vec3::zero(); let top = cmp::min(image.len(), (super_j+1)*subsample); let right = cmp::min(image[0].len(), (super_i+1)*subsample); let h = top - super_j*subsample; let w = right - super_i*subsample; for j in (super_j*subsample..top).rev() { for i in super_i*subsample..right { super_pixel = super_pixel + image[j][i]; } } let mut out_col = super_pixel / (ns * (w as f64) * (h as f64)); f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap(); } } } fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>, camera: &Camera, bvh_world: &Hitable, background: &Background, lights: &Vec<AABB>, nx: usize, ny: usize, rng: &mut rand::ThreadRng) { for j in (0..ny).rev() { for i in 0..nx { let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64); let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64); let r = camera.get_ray(u, v); output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights); } } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] struct ImageSummaries { w: usize, h: usize, s: usize, data: Vec<Vec<Vec3>> } fn combine_summaries(summary1: &ImageSummaries, summary2: &ImageSummaries) -> ImageSummaries { if summary1.w != summary2.w { panic!(format!("Need same widths ({} vs {})!", summary1.w, summary2.w)); } if summary1.h != summary2.h { panic!(format!("Need same heights ({} vs {})!", summary1.h, summary2.h)); } if summary1.data.len() != summary2.data.len() { panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?", summary1.data.len(), summary2.data.len())); } let mut result = Vec::new(); for i in 0..summary1.data.len() { let l1 = summary1.data[i].len(); let l2 = summary2.data[i].len(); if l1 != l2 { panic!(format!( "Inconsistent row lengths (row {}: {} {}) - upstream bug?", i, l1, l2)); } let row1 = summary1.data[i].iter(); let row2 = summary2.data[i].iter(); result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect()) } ImageSummaries { w: summary1.w, h: summary1.h, s: summary1.s + summary2.s, data: result } } fn write_image(args: &Args) { let default_output_name = "out".to_string(); let output_name = &args.o.as_ref().unwrap_or(&default_output_name); let default_input_name = "/dev/stdin".to_string(); let input_name = &args.i.as_ref().unwrap_or(&default_input_name); let br = BufReader::new(File::open(input_name).unwrap()); let json_value = serde_json::from_reader(br).unwrap(); let scene = deserialize_scene(&json_value).unwrap(); let background = scene.background; let camera = scene.camera; let lights: Vec<_> = scene.object_list .iter() .map(|h| h.importance_distribution()) .filter(|h| h.is_some()) .map(|h| h.unwrap()) .collect(); let bvh_world = BVH::build(scene.object_list); let ny = args.h.unwrap_or(200); let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize); let n_threads = args.n.unwrap_or(1); let ns = args.s.unwrap_or(100) / n_threads; let background_ref = &*background; let bvh_world_ref = &*bvh_world; println!("With {} threads", n_threads); let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| { let mut output_image = Vec::<Vec<Vec3>>::new(); for _j in 0..ny { output_image.push(vec![Vec3::zero(); nx]); } let mut rng = rand::thread_rng(); for s in 1..ns+1 { update_all_pixels(&mut output_image, &camera, bvh_world_ref, background_ref, &lights, nx, ny, &mut rng); if i == 0 { eprint!("\r \r{} / {} done", s, ns); } } if i == 0 { eprintln!("\nFinished"); } ImageSummaries { w: nx, h: ny, s: ns, data: output_image } }).collect(); let mut summary = output_summaries[0].clone(); for new_summary in output_summaries.iter().skip(1) { summary = combine_summaries(&summary, &new_summary); } println!("Using {} samples", summary.s); write_image_to_file(&summary.data, summary.s, 1, &output_name); } ////////////////////////////////////////////////////////////////////////////// struct Args { pub w: Option<usize>, pub h: Option<usize>, pub s: Option<usize>, pub n: Option<usize>, pub o: Option<String>, pub i: Option<String>, pub parallel: bool } fn main() { random::init_rng(); let args: Vec<String> = env::args().collect(); let mut opts = Options::new(); opts.optopt("w", "width", "set image width in pixels", "NAME"); opts.optopt("h", "height", "set image height in pixels", "NAME"); opts.optopt("s", "samples", "set number of samples per pixel", "NAME"); opts.optopt("n", "nthreads", "number of threads, default 1", "NAME"); opts.optopt("o", "output", "set output file name", "NAME"); opts.optopt("i", "input", "set input file name", "NAME"); opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing"); opts.optflag("?", "help", "print this help menu"); let matches = match opts.parse(&args[1..]) { Ok(m) => { m } Err(f) => { panic!(f.to_string()) } }; write_image(&(Args { w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()), h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()), s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()), n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()), o: matches.opt_str("o"), i: matches.opt_str("i"), parallel: matches.opt_present("p") })); }
{ let mut current_ray = *ray; let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0); for _depth in 0..50 { if current_attenuation.length() < 1e-8 { return Vec3::new(0.0, 0.0, 0.0) } match world.hit(&current_ray, 0.00001, 1e20) { None => { let unit_direction = vector::unit_vector(&current_ray.direction()); return background.get_background(&unit_direction) * current_attenuation; }, Some(hr) => { if !hr.material.wants_importance_sampling() || lights.len() == 0 { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(next_attenuation, scattered) => { current_attenuation = current_attenuation * next_attenuation; current_ray = scattered; }, material::Scatter::Emit(emission) => { // println!("Hit light!"); return emission * current_attenuation; }, material::Scatter::Absorb => { return Vec3::new(0.0, 0.0, 0.0) } } continue; } let this_hemi = Disc::new(hr.p, hr.normal, 1.0); let light = { let chosen_light = &lights[rand_range(0, lights.len())]; let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p); // sample from that disc let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc); let gx = gx_sample.0; let sample_direction = gx_sample.1; if gx == 0.0 { (0.0, sample_direction) } else { (2.0 * std::f64::consts::PI / gx_sample.0, sample_direction) } }; let scatter = { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(_attenuation, scattered) => { (hr.material.bsdf(&current_ray, &scattered, &hr.normal), scattered.direction()) } material::Scatter::Emit(_emission) => { panic!("Whaaaaa emit?!") }, material::Scatter::Absorb => { panic!("Whaaaaa absorb?!") } } }; let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 }; let light_d = light.1; let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 }; let scatter_d = scatter.1; // Veach's balance heuristic for a one-sample MIS estimator // gives these weights: let s = light_p + scatter_p; let light_w = light_p / s; let scatter_w = scatter_p / s; // println!("{} {}", light_p, scatter_p); // the classic Veach one-sample MIS estimator is // (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights ((light_w / light_p) * 2.0, Ray::new(hr.p, light_d)) } else if scatter_p > 0.0 { ((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d)) } else { return Vec3::new(0.0, 0.0, 0.0); }; let albedo = hr.material.albedo(&current_ray, &next_values.1, &hr.normal); current_ray = next_values.1; current_attenuation = current_attenuation * albedo * next_values.0; } } } current_attenuation }
identifier_body
main.rs
extern crate rand; extern crate getopts; #[macro_use] extern crate serde_derive; extern crate serde; extern crate serde_json; extern crate bincode; extern crate rayon; pub mod aabb; pub mod background; pub mod bvh; pub mod camera; pub mod deserialize; pub mod dielectric; pub mod disc; pub mod emitter; pub mod hitable; pub mod hitable_list; pub mod lambertian; pub mod material; pub mod metal; pub mod mixture; // pub mod phong; pub mod plane; pub mod random; pub mod ray; pub mod rectangle; pub mod scene; pub mod sampling; pub mod sphere; pub mod sphere_geometry; pub mod triangle_mesh; pub mod vector; pub mod tests; pub mod ward; use aabb::AABB; use background::*; use bvh::BVH; use camera::Camera; use deserialize::*; use disc::*; use getopts::Options; use hitable::*; use rand::Rng; use random::*; use ray::Ray; use vector::Vec3; use std::cmp; use std::env; use std::fs::File; use std::io::BufReader; use std::io::BufWriter; use std::io::Write; use rayon::prelude::*; ////////////////////////////////////////////////////////////////////////////// fn color(ray: &Ray, world: &Hitable, background: &Background, lights: &Vec<AABB>) -> Vec3 where { let mut current_ray = *ray; let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0); for _depth in 0..50 { if current_attenuation.length() < 1e-8 { return Vec3::new(0.0, 0.0, 0.0) } match world.hit(&current_ray, 0.00001, 1e20) { None => { let unit_direction = vector::unit_vector(&current_ray.direction()); return background.get_background(&unit_direction) * current_attenuation; }, Some(hr) => { if !hr.material.wants_importance_sampling() || lights.len() == 0 { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(next_attenuation, scattered) => { current_attenuation = current_attenuation * next_attenuation; current_ray = scattered; }, material::Scatter::Emit(emission) => { // println!("Hit light!"); return emission * current_attenuation; }, material::Scatter::Absorb => { return Vec3::new(0.0, 0.0, 0.0) } } continue; } let this_hemi = Disc::new(hr.p, hr.normal, 1.0); let light = { let chosen_light = &lights[rand_range(0, lights.len())]; let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p); // sample from that disc let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc); let gx = gx_sample.0; let sample_direction = gx_sample.1; if gx == 0.0 { (0.0, sample_direction) } else { (2.0 * std::f64::consts::PI / gx_sample.0, sample_direction) } }; let scatter = { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(_attenuation, scattered) => { (hr.material.bsdf(&current_ray, &scattered, &hr.normal), scattered.direction()) } material::Scatter::Emit(_emission) => { panic!("Whaaaaa emit?!") }, material::Scatter::Absorb => { panic!("Whaaaaa absorb?!") } } }; let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 }; let light_d = light.1; let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 }; let scatter_d = scatter.1; // Veach's balance heuristic for a one-sample MIS estimator // gives these weights: let s = light_p + scatter_p; let light_w = light_p / s; let scatter_w = scatter_p / s; // println!("{} {}", light_p, scatter_p); // the classic Veach one-sample MIS estimator is // (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights ((light_w / light_p) * 2.0, Ray::new(hr.p, light_d)) } else if scatter_p > 0.0 { ((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d)) } else { return Vec3::new(0.0, 0.0, 0.0); }; let albedo = hr.material.albedo(&current_ray, &next_values.1, &hr.normal); current_ray = next_values.1; current_attenuation = current_attenuation * albedo * next_values.0; } } } current_attenuation } ////////////////////////////////////////////////////////////////////////////// // my own bastardized version of a float file format, horrendously inefficient fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String) { println!("Writing output to {}", format!("{}.linear_rgb", file_prefix)); let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap()); let ny = image.len()/subsample; let nx = image[0].len()/subsample; let ns = samples_so_far as f64; f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap(); for super_j in (0..ny).rev() { for super_i in 0..nx { let mut super_pixel = Vec3::zero(); let top = cmp::min(image.len(), (super_j+1)*subsample); let right = cmp::min(image[0].len(), (super_i+1)*subsample); let h = top - super_j*subsample; let w = right - super_i*subsample; for j in (super_j*subsample..top).rev() { for i in super_i*subsample..right { super_pixel = super_pixel + image[j][i]; } } let mut out_col = super_pixel / (ns * (w as f64) * (h as f64)); f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap(); } } } fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>, camera: &Camera, bvh_world: &Hitable, background: &Background, lights: &Vec<AABB>, nx: usize, ny: usize, rng: &mut rand::ThreadRng) { for j in (0..ny).rev() { for i in 0..nx { let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64); let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64); let r = camera.get_ray(u, v); output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights); } } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] struct ImageSummaries { w: usize, h: usize, s: usize, data: Vec<Vec<Vec3>> } fn combine_summaries(summary1: &ImageSummaries, summary2: &ImageSummaries) -> ImageSummaries { if summary1.w != summary2.w { panic!(format!("Need same widths ({} vs {})!", summary1.w, summary2.w)); } if summary1.h != summary2.h { panic!(format!("Need same heights ({} vs {})!", summary1.h, summary2.h)); } if summary1.data.len() != summary2.data.len() { panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?", summary1.data.len(), summary2.data.len())); } let mut result = Vec::new(); for i in 0..summary1.data.len() { let l1 = summary1.data[i].len(); let l2 = summary2.data[i].len(); if l1 != l2 { panic!(format!( "Inconsistent row lengths (row {}: {} {}) - upstream bug?", i, l1, l2)); } let row1 = summary1.data[i].iter(); let row2 = summary2.data[i].iter(); result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect()) } ImageSummaries { w: summary1.w, h: summary1.h, s: summary1.s + summary2.s, data: result } } fn
(args: &Args) { let default_output_name = "out".to_string(); let output_name = &args.o.as_ref().unwrap_or(&default_output_name); let default_input_name = "/dev/stdin".to_string(); let input_name = &args.i.as_ref().unwrap_or(&default_input_name); let br = BufReader::new(File::open(input_name).unwrap()); let json_value = serde_json::from_reader(br).unwrap(); let scene = deserialize_scene(&json_value).unwrap(); let background = scene.background; let camera = scene.camera; let lights: Vec<_> = scene.object_list .iter() .map(|h| h.importance_distribution()) .filter(|h| h.is_some()) .map(|h| h.unwrap()) .collect(); let bvh_world = BVH::build(scene.object_list); let ny = args.h.unwrap_or(200); let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize); let n_threads = args.n.unwrap_or(1); let ns = args.s.unwrap_or(100) / n_threads; let background_ref = &*background; let bvh_world_ref = &*bvh_world; println!("With {} threads", n_threads); let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| { let mut output_image = Vec::<Vec<Vec3>>::new(); for _j in 0..ny { output_image.push(vec![Vec3::zero(); nx]); } let mut rng = rand::thread_rng(); for s in 1..ns+1 { update_all_pixels(&mut output_image, &camera, bvh_world_ref, background_ref, &lights, nx, ny, &mut rng); if i == 0 { eprint!("\r \r{} / {} done", s, ns); } } if i == 0 { eprintln!("\nFinished"); } ImageSummaries { w: nx, h: ny, s: ns, data: output_image } }).collect(); let mut summary = output_summaries[0].clone(); for new_summary in output_summaries.iter().skip(1) { summary = combine_summaries(&summary, &new_summary); } println!("Using {} samples", summary.s); write_image_to_file(&summary.data, summary.s, 1, &output_name); } ////////////////////////////////////////////////////////////////////////////// struct Args { pub w: Option<usize>, pub h: Option<usize>, pub s: Option<usize>, pub n: Option<usize>, pub o: Option<String>, pub i: Option<String>, pub parallel: bool } fn main() { random::init_rng(); let args: Vec<String> = env::args().collect(); let mut opts = Options::new(); opts.optopt("w", "width", "set image width in pixels", "NAME"); opts.optopt("h", "height", "set image height in pixels", "NAME"); opts.optopt("s", "samples", "set number of samples per pixel", "NAME"); opts.optopt("n", "nthreads", "number of threads, default 1", "NAME"); opts.optopt("o", "output", "set output file name", "NAME"); opts.optopt("i", "input", "set input file name", "NAME"); opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing"); opts.optflag("?", "help", "print this help menu"); let matches = match opts.parse(&args[1..]) { Ok(m) => { m } Err(f) => { panic!(f.to_string()) } }; write_image(&(Args { w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()), h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()), s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()), n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()), o: matches.opt_str("o"), i: matches.opt_str("i"), parallel: matches.opt_present("p") })); }
write_image
identifier_name
main.rs
extern crate rand; extern crate getopts; #[macro_use] extern crate serde_derive; extern crate serde; extern crate serde_json; extern crate bincode; extern crate rayon; pub mod aabb; pub mod background; pub mod bvh; pub mod camera; pub mod deserialize; pub mod dielectric; pub mod disc; pub mod emitter; pub mod hitable; pub mod hitable_list; pub mod lambertian; pub mod material; pub mod metal; pub mod mixture; // pub mod phong; pub mod plane; pub mod random; pub mod ray; pub mod rectangle; pub mod scene; pub mod sampling; pub mod sphere; pub mod sphere_geometry; pub mod triangle_mesh; pub mod vector; pub mod tests; pub mod ward; use aabb::AABB; use background::*; use bvh::BVH; use camera::Camera; use deserialize::*; use disc::*; use getopts::Options; use hitable::*; use rand::Rng; use random::*; use ray::Ray; use vector::Vec3; use std::cmp; use std::env; use std::fs::File; use std::io::BufReader; use std::io::BufWriter; use std::io::Write; use rayon::prelude::*; ////////////////////////////////////////////////////////////////////////////// fn color(ray: &Ray, world: &Hitable, background: &Background, lights: &Vec<AABB>) -> Vec3 where { let mut current_ray = *ray; let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0); for _depth in 0..50 { if current_attenuation.length() < 1e-8 { return Vec3::new(0.0, 0.0, 0.0) } match world.hit(&current_ray, 0.00001, 1e20) { None => { let unit_direction = vector::unit_vector(&current_ray.direction()); return background.get_background(&unit_direction) * current_attenuation; }, Some(hr) => { if !hr.material.wants_importance_sampling() || lights.len() == 0 { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(next_attenuation, scattered) => { current_attenuation = current_attenuation * next_attenuation; current_ray = scattered; }, material::Scatter::Emit(emission) => { // println!("Hit light!"); return emission * current_attenuation; }, material::Scatter::Absorb => { return Vec3::new(0.0, 0.0, 0.0) } } continue; } let this_hemi = Disc::new(hr.p, hr.normal, 1.0); let light = { let chosen_light = &lights[rand_range(0, lights.len())]; let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p); // sample from that disc let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc); let gx = gx_sample.0; let sample_direction = gx_sample.1; if gx == 0.0 { (0.0, sample_direction) } else { (2.0 * std::f64::consts::PI / gx_sample.0, sample_direction) } }; let scatter = { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(_attenuation, scattered) => { (hr.material.bsdf(&current_ray, &scattered, &hr.normal), scattered.direction()) } material::Scatter::Emit(_emission) => { panic!("Whaaaaa emit?!") }, material::Scatter::Absorb => { panic!("Whaaaaa absorb?!") } } }; let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 }; let light_d = light.1; let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 }; let scatter_d = scatter.1; // Veach's balance heuristic for a one-sample MIS estimator // gives these weights: let s = light_p + scatter_p; let light_w = light_p / s; let scatter_w = scatter_p / s; // println!("{} {}", light_p, scatter_p); // the classic Veach one-sample MIS estimator is // (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights ((light_w / light_p) * 2.0, Ray::new(hr.p, light_d)) } else if scatter_p > 0.0 { ((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d)) } else { return Vec3::new(0.0, 0.0, 0.0); }; let albedo = hr.material.albedo(&current_ray, &next_values.1, &hr.normal); current_ray = next_values.1; current_attenuation = current_attenuation * albedo * next_values.0; } } } current_attenuation }
////////////////////////////////////////////////////////////////////////////// // my own bastardized version of a float file format, horrendously inefficient fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String) { println!("Writing output to {}", format!("{}.linear_rgb", file_prefix)); let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap()); let ny = image.len()/subsample; let nx = image[0].len()/subsample; let ns = samples_so_far as f64; f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap(); for super_j in (0..ny).rev() { for super_i in 0..nx { let mut super_pixel = Vec3::zero(); let top = cmp::min(image.len(), (super_j+1)*subsample); let right = cmp::min(image[0].len(), (super_i+1)*subsample); let h = top - super_j*subsample; let w = right - super_i*subsample; for j in (super_j*subsample..top).rev() { for i in super_i*subsample..right { super_pixel = super_pixel + image[j][i]; } } let mut out_col = super_pixel / (ns * (w as f64) * (h as f64)); f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap(); } } } fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>, camera: &Camera, bvh_world: &Hitable, background: &Background, lights: &Vec<AABB>, nx: usize, ny: usize, rng: &mut rand::ThreadRng) { for j in (0..ny).rev() { for i in 0..nx { let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64); let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64); let r = camera.get_ray(u, v); output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights); } } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] struct ImageSummaries { w: usize, h: usize, s: usize, data: Vec<Vec<Vec3>> } fn combine_summaries(summary1: &ImageSummaries, summary2: &ImageSummaries) -> ImageSummaries { if summary1.w != summary2.w { panic!(format!("Need same widths ({} vs {})!", summary1.w, summary2.w)); } if summary1.h != summary2.h { panic!(format!("Need same heights ({} vs {})!", summary1.h, summary2.h)); } if summary1.data.len() != summary2.data.len() { panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?", summary1.data.len(), summary2.data.len())); } let mut result = Vec::new(); for i in 0..summary1.data.len() { let l1 = summary1.data[i].len(); let l2 = summary2.data[i].len(); if l1 != l2 { panic!(format!( "Inconsistent row lengths (row {}: {} {}) - upstream bug?", i, l1, l2)); } let row1 = summary1.data[i].iter(); let row2 = summary2.data[i].iter(); result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect()) } ImageSummaries { w: summary1.w, h: summary1.h, s: summary1.s + summary2.s, data: result } } fn write_image(args: &Args) { let default_output_name = "out".to_string(); let output_name = &args.o.as_ref().unwrap_or(&default_output_name); let default_input_name = "/dev/stdin".to_string(); let input_name = &args.i.as_ref().unwrap_or(&default_input_name); let br = BufReader::new(File::open(input_name).unwrap()); let json_value = serde_json::from_reader(br).unwrap(); let scene = deserialize_scene(&json_value).unwrap(); let background = scene.background; let camera = scene.camera; let lights: Vec<_> = scene.object_list .iter() .map(|h| h.importance_distribution()) .filter(|h| h.is_some()) .map(|h| h.unwrap()) .collect(); let bvh_world = BVH::build(scene.object_list); let ny = args.h.unwrap_or(200); let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize); let n_threads = args.n.unwrap_or(1); let ns = args.s.unwrap_or(100) / n_threads; let background_ref = &*background; let bvh_world_ref = &*bvh_world; println!("With {} threads", n_threads); let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| { let mut output_image = Vec::<Vec<Vec3>>::new(); for _j in 0..ny { output_image.push(vec![Vec3::zero(); nx]); } let mut rng = rand::thread_rng(); for s in 1..ns+1 { update_all_pixels(&mut output_image, &camera, bvh_world_ref, background_ref, &lights, nx, ny, &mut rng); if i == 0 { eprint!("\r \r{} / {} done", s, ns); } } if i == 0 { eprintln!("\nFinished"); } ImageSummaries { w: nx, h: ny, s: ns, data: output_image } }).collect(); let mut summary = output_summaries[0].clone(); for new_summary in output_summaries.iter().skip(1) { summary = combine_summaries(&summary, &new_summary); } println!("Using {} samples", summary.s); write_image_to_file(&summary.data, summary.s, 1, &output_name); } ////////////////////////////////////////////////////////////////////////////// struct Args { pub w: Option<usize>, pub h: Option<usize>, pub s: Option<usize>, pub n: Option<usize>, pub o: Option<String>, pub i: Option<String>, pub parallel: bool } fn main() { random::init_rng(); let args: Vec<String> = env::args().collect(); let mut opts = Options::new(); opts.optopt("w", "width", "set image width in pixels", "NAME"); opts.optopt("h", "height", "set image height in pixels", "NAME"); opts.optopt("s", "samples", "set number of samples per pixel", "NAME"); opts.optopt("n", "nthreads", "number of threads, default 1", "NAME"); opts.optopt("o", "output", "set output file name", "NAME"); opts.optopt("i", "input", "set input file name", "NAME"); opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing"); opts.optflag("?", "help", "print this help menu"); let matches = match opts.parse(&args[1..]) { Ok(m) => { m } Err(f) => { panic!(f.to_string()) } }; write_image(&(Args { w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()), h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()), s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()), n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()), o: matches.opt_str("o"), i: matches.opt_str("i"), parallel: matches.opt_present("p") })); }
random_line_split
main.rs
extern crate rand; extern crate getopts; #[macro_use] extern crate serde_derive; extern crate serde; extern crate serde_json; extern crate bincode; extern crate rayon; pub mod aabb; pub mod background; pub mod bvh; pub mod camera; pub mod deserialize; pub mod dielectric; pub mod disc; pub mod emitter; pub mod hitable; pub mod hitable_list; pub mod lambertian; pub mod material; pub mod metal; pub mod mixture; // pub mod phong; pub mod plane; pub mod random; pub mod ray; pub mod rectangle; pub mod scene; pub mod sampling; pub mod sphere; pub mod sphere_geometry; pub mod triangle_mesh; pub mod vector; pub mod tests; pub mod ward; use aabb::AABB; use background::*; use bvh::BVH; use camera::Camera; use deserialize::*; use disc::*; use getopts::Options; use hitable::*; use rand::Rng; use random::*; use ray::Ray; use vector::Vec3; use std::cmp; use std::env; use std::fs::File; use std::io::BufReader; use std::io::BufWriter; use std::io::Write; use rayon::prelude::*; ////////////////////////////////////////////////////////////////////////////// fn color(ray: &Ray, world: &Hitable, background: &Background, lights: &Vec<AABB>) -> Vec3 where { let mut current_ray = *ray; let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0); for _depth in 0..50 { if current_attenuation.length() < 1e-8 { return Vec3::new(0.0, 0.0, 0.0) } match world.hit(&current_ray, 0.00001, 1e20) { None => { let unit_direction = vector::unit_vector(&current_ray.direction()); return background.get_background(&unit_direction) * current_attenuation; }, Some(hr) => { if !hr.material.wants_importance_sampling() || lights.len() == 0 { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(next_attenuation, scattered) => { current_attenuation = current_attenuation * next_attenuation; current_ray = scattered; }, material::Scatter::Emit(emission) => { // println!("Hit light!"); return emission * current_attenuation; }, material::Scatter::Absorb => { return Vec3::new(0.0, 0.0, 0.0) } } continue; } let this_hemi = Disc::new(hr.p, hr.normal, 1.0); let light = { let chosen_light = &lights[rand_range(0, lights.len())]; let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p); // sample from that disc let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc); let gx = gx_sample.0; let sample_direction = gx_sample.1; if gx == 0.0 { (0.0, sample_direction) } else { (2.0 * std::f64::consts::PI / gx_sample.0, sample_direction) } }; let scatter = { match hr.material.scatter(&current_ray, &hr) { material::Scatter::Bounce(_attenuation, scattered) => { (hr.material.bsdf(&current_ray, &scattered, &hr.normal), scattered.direction()) } material::Scatter::Emit(_emission) => { panic!("Whaaaaa emit?!") }, material::Scatter::Absorb => { panic!("Whaaaaa absorb?!") } } }; let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 }; let light_d = light.1; let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 }; let scatter_d = scatter.1; // Veach's balance heuristic for a one-sample MIS estimator // gives these weights: let s = light_p + scatter_p; let light_w = light_p / s; let scatter_w = scatter_p / s; // println!("{} {}", light_p, scatter_p); // the classic Veach one-sample MIS estimator is // (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights ((light_w / light_p) * 2.0, Ray::new(hr.p, light_d)) } else if scatter_p > 0.0 { ((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d)) } else
; let albedo = hr.material.albedo(&current_ray, &next_values.1, &hr.normal); current_ray = next_values.1; current_attenuation = current_attenuation * albedo * next_values.0; } } } current_attenuation } ////////////////////////////////////////////////////////////////////////////// // my own bastardized version of a float file format, horrendously inefficient fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String) { println!("Writing output to {}", format!("{}.linear_rgb", file_prefix)); let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap()); let ny = image.len()/subsample; let nx = image[0].len()/subsample; let ns = samples_so_far as f64; f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap(); for super_j in (0..ny).rev() { for super_i in 0..nx { let mut super_pixel = Vec3::zero(); let top = cmp::min(image.len(), (super_j+1)*subsample); let right = cmp::min(image[0].len(), (super_i+1)*subsample); let h = top - super_j*subsample; let w = right - super_i*subsample; for j in (super_j*subsample..top).rev() { for i in super_i*subsample..right { super_pixel = super_pixel + image[j][i]; } } let mut out_col = super_pixel / (ns * (w as f64) * (h as f64)); f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap(); } } } fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>, camera: &Camera, bvh_world: &Hitable, background: &Background, lights: &Vec<AABB>, nx: usize, ny: usize, rng: &mut rand::ThreadRng) { for j in (0..ny).rev() { for i in 0..nx { let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64); let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64); let r = camera.get_ray(u, v); output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights); } } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] struct ImageSummaries { w: usize, h: usize, s: usize, data: Vec<Vec<Vec3>> } fn combine_summaries(summary1: &ImageSummaries, summary2: &ImageSummaries) -> ImageSummaries { if summary1.w != summary2.w { panic!(format!("Need same widths ({} vs {})!", summary1.w, summary2.w)); } if summary1.h != summary2.h { panic!(format!("Need same heights ({} vs {})!", summary1.h, summary2.h)); } if summary1.data.len() != summary2.data.len() { panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?", summary1.data.len(), summary2.data.len())); } let mut result = Vec::new(); for i in 0..summary1.data.len() { let l1 = summary1.data[i].len(); let l2 = summary2.data[i].len(); if l1 != l2 { panic!(format!( "Inconsistent row lengths (row {}: {} {}) - upstream bug?", i, l1, l2)); } let row1 = summary1.data[i].iter(); let row2 = summary2.data[i].iter(); result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect()) } ImageSummaries { w: summary1.w, h: summary1.h, s: summary1.s + summary2.s, data: result } } fn write_image(args: &Args) { let default_output_name = "out".to_string(); let output_name = &args.o.as_ref().unwrap_or(&default_output_name); let default_input_name = "/dev/stdin".to_string(); let input_name = &args.i.as_ref().unwrap_or(&default_input_name); let br = BufReader::new(File::open(input_name).unwrap()); let json_value = serde_json::from_reader(br).unwrap(); let scene = deserialize_scene(&json_value).unwrap(); let background = scene.background; let camera = scene.camera; let lights: Vec<_> = scene.object_list .iter() .map(|h| h.importance_distribution()) .filter(|h| h.is_some()) .map(|h| h.unwrap()) .collect(); let bvh_world = BVH::build(scene.object_list); let ny = args.h.unwrap_or(200); let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize); let n_threads = args.n.unwrap_or(1); let ns = args.s.unwrap_or(100) / n_threads; let background_ref = &*background; let bvh_world_ref = &*bvh_world; println!("With {} threads", n_threads); let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| { let mut output_image = Vec::<Vec<Vec3>>::new(); for _j in 0..ny { output_image.push(vec![Vec3::zero(); nx]); } let mut rng = rand::thread_rng(); for s in 1..ns+1 { update_all_pixels(&mut output_image, &camera, bvh_world_ref, background_ref, &lights, nx, ny, &mut rng); if i == 0 { eprint!("\r \r{} / {} done", s, ns); } } if i == 0 { eprintln!("\nFinished"); } ImageSummaries { w: nx, h: ny, s: ns, data: output_image } }).collect(); let mut summary = output_summaries[0].clone(); for new_summary in output_summaries.iter().skip(1) { summary = combine_summaries(&summary, &new_summary); } println!("Using {} samples", summary.s); write_image_to_file(&summary.data, summary.s, 1, &output_name); } ////////////////////////////////////////////////////////////////////////////// struct Args { pub w: Option<usize>, pub h: Option<usize>, pub s: Option<usize>, pub n: Option<usize>, pub o: Option<String>, pub i: Option<String>, pub parallel: bool } fn main() { random::init_rng(); let args: Vec<String> = env::args().collect(); let mut opts = Options::new(); opts.optopt("w", "width", "set image width in pixels", "NAME"); opts.optopt("h", "height", "set image height in pixels", "NAME"); opts.optopt("s", "samples", "set number of samples per pixel", "NAME"); opts.optopt("n", "nthreads", "number of threads, default 1", "NAME"); opts.optopt("o", "output", "set output file name", "NAME"); opts.optopt("i", "input", "set input file name", "NAME"); opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing"); opts.optflag("?", "help", "print this help menu"); let matches = match opts.parse(&args[1..]) { Ok(m) => { m } Err(f) => { panic!(f.to_string()) } }; write_image(&(Args { w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()), h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()), s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()), n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()), o: matches.opt_str("o"), i: matches.opt_str("i"), parallel: matches.opt_present("p") })); }
{ return Vec3::new(0.0, 0.0, 0.0); }
conditional_block
mod.rs
use crate::css::{is_not, CallArgs, CssString, Value}; use crate::error::Error; use crate::output::{Format, Formatted}; use crate::parser::SourcePos; use crate::sass::{FormalArgs, Name}; use crate::value::Numeric; use crate::{sass, Scope, ScopeRef}; use lazy_static::lazy_static; use std::collections::BTreeMap; use std::sync::Arc; use std::{cmp, fmt}; #[macro_use] mod macros; mod color; mod list; mod map; mod math; mod meta; mod selector; mod string; type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync; /// A function that can be called from a sass value. /// /// The function can be either "builtin" (implemented in rust) or /// "user defined" (implemented in scss). #[derive(Clone, Debug, PartialEq, Eq, PartialOrd)] pub struct Function { args: FormalArgs, pos: SourcePos, body: FuncImpl, } #[derive(Clone)] pub enum FuncImpl { Builtin(Arc<BuiltinFn>), /// A user-defined function is really a closure, it has a scope /// where it is defined and a body of items. UserDefined(ScopeRef, Vec<sass::Item>), } impl PartialOrd for FuncImpl { fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> { match (self, rhs) { (&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None, (&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => { Some(cmp::Ordering::Less) } (&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => { Some(cmp::Ordering::Greater) } ( &FuncImpl::UserDefined(ref _sa, ref a), &FuncImpl::UserDefined(ref _sb, ref b), ) => a.partial_cmp(b), } } } impl cmp::PartialEq for FuncImpl { fn eq(&self, rhs: &FuncImpl) -> bool { match (self, rhs) { ( &FuncImpl::UserDefined(ref sa, ref a), &FuncImpl::UserDefined(ref sb, ref b), ) => ScopeRef::is_same(sa, sb) && a == b, (&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => { // Each builtin function is only created once, so this // should be ok. #[allow(clippy::vtable_address_comparisons)] Arc::ptr_eq(a, b) } _ => false, } } } impl cmp::Eq for FuncImpl {} impl fmt::Debug for FuncImpl { fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { FuncImpl::Builtin(_) => write!(out, "(builtin function)"), FuncImpl::UserDefined(..) => { write!(out, "(user-defined function)") } } } } trait Functions { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ); } impl Functions for Scope { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin(&self.get_name(), &name, args, body); self.define_function(name, f); } } impl Function { /// Get a built-in function by name. pub fn get_builtin(name: &Name) -> Option<&'static Function> { FUNCTIONS.get(name) } /// Create a new `Function` from a rust implementation. /// /// Note: This does not expose the function in any scope, it just /// creates it. pub fn builtin( module: &str, name: &Name, args: FormalArgs, body: Arc<BuiltinFn>, ) -> Self { let pos = SourcePos::mock_function(name, &args, module); Function { args, pos, body: FuncImpl::Builtin(body), } } /// Create a new `Function` from a scss implementation. /// /// The scope is where the function is defined, used to bind any /// non-parameter names in the body. pub fn
( args: FormalArgs, pos: SourcePos, scope: ScopeRef, body: Vec<sass::Item>, ) -> Self { Function { args, pos, body: FuncImpl::UserDefined(scope, body), } } /// Call the function from a given scope and with a given set of /// arguments. pub fn call( &self, callscope: ScopeRef, args: CallArgs, ) -> Result<Value, Error> { let cs = "%%CALLING_SCOPE%%"; match self.body { FuncImpl::Builtin(ref body) => { let s = self.do_eval_args( ScopeRef::new_global(callscope.get_format()), args, )?; s.define_module(cs.into(), callscope); body(&s) } FuncImpl::UserDefined(ref defscope, ref body) => { let s = self.do_eval_args(defscope.clone(), args)?; s.define_module(cs.into(), callscope); Ok(s.eval_body(body)?.unwrap_or(Value::Null)) } } .map(Value::into_calculated) } fn do_eval_args( &self, def: ScopeRef, args: CallArgs, ) -> Result<ScopeRef, Error> { self.args.eval(def, args).map_err(|e| match e { sass::ArgsError::Eval(e) => e, ae => Error::BadArguments(ae.to_string(), self.pos.clone()), }) } } lazy_static! { static ref MODULES: BTreeMap<&'static str, Scope> = { let mut modules = BTreeMap::new(); modules.insert("sass:color", color::create_module()); modules.insert("sass:list", list::create_module()); modules.insert("sass:map", map::create_module()); modules.insert("sass:math", math::create_module()); modules.insert("sass:meta", meta::create_module()); modules.insert("sass:selector", selector::create_module()); modules.insert("sass:string", string::create_module()); modules }; } /// Get a global module (e.g. `sass:math`) by name. pub fn get_global_module(name: &str) -> Option<ScopeRef> { MODULES.get(name).map(ScopeRef::Builtin) } type FunctionMap = BTreeMap<Name, Function>; impl Functions for FunctionMap { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin("", &name, args, body); self.insert(name, f); } } lazy_static! { static ref FUNCTIONS: FunctionMap = { let mut f = BTreeMap::new(); def!(f, if(condition, if_true, if_false), |s| { if s.get("condition")?.is_true() { Ok(s.get("if_true")?) } else { Ok(s.get("if_false")?) } }); color::expose(MODULES.get("sass:color").unwrap(), &mut f); list::expose(MODULES.get("sass:list").unwrap(), &mut f); map::expose(MODULES.get("sass:map").unwrap(), &mut f); math::expose(MODULES.get("sass:math").unwrap(), &mut f); meta::expose(MODULES.get("sass:meta").unwrap(), &mut f); selector::expose(MODULES.get("sass:selector").unwrap(), &mut f); string::expose(MODULES.get("sass:string").unwrap(), &mut f); f }; } // argument helpers for the actual functions trait CheckedArg<T> { fn named(self, name: Name) -> Result<T, Error>; } impl<T> CheckedArg<T> for Result<T, String> { fn named(self, name: Name) -> Result<T, Error> { self.map_err(|e| Error::BadArgument(name, e)) } } fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error> where F: Fn(Value) -> Result<T, String>, { check(s.get(name.as_ref())?).named(name) } fn get_opt_check<T, F>( s: &Scope, name: Name, check: F, ) -> Result<Option<T>, Error> where F: Fn(Value) -> Result<T, String>, { match s.get(name.as_ref())? { Value::Null => Ok(None), v => check(v).named(name).map(Some), } } fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> { get_checked(s, name.into(), check::numeric) } fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> { get_checked(s, name, check::unitless_int) } fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> { get_checked(s, name.into(), check::string) } fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> { get_checked(s, name, check::va_list) } fn expected_to<'a, T>(value: &'a T, cond: &str) -> String where Formatted<'a, T>: std::fmt::Display, { format!( "Expected {} to {}.", Formatted { value, format: Format::introspect() }, cond, ) } mod check { use super::{expected_to, is_not}; use crate::css::{CssString, Value}; use crate::value::{ListSeparator, Number, Numeric}; pub fn numeric(v: Value) -> Result<Numeric, String> { v.numeric_value().map_err(|v| is_not(&v, "a number")) } pub fn int(v: Value) -> Result<i64, String> { numeric(v)? .value .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn unitless(v: Value) -> Result<Number, String> { let val = numeric(v)?; if val.is_no_unit() { Ok(val.value) } else { Err(expected_to(&val, "have no units")) } } pub fn unitless_int(v: Value) -> Result<i64, String> { unitless(v)? .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn string(v: Value) -> Result<CssString, String> { match v { Value::Literal(s) => Ok(s), Value::Call(name, args) => { Ok(format!("{}({})", name, args).into()) } v => Err(is_not(&v, "a string")), } } pub fn va_list(v: Value) -> Result<Vec<Value>, String> { match v { Value::ArgList(args) => { args.check_no_named().map_err(|e| e.to_string())?; Ok(args.positional) } Value::List(v, Some(ListSeparator::Comma), _) => Ok(v), single => Ok(vec![single]), } } pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> { let result = va_list(v)?; if result.is_empty() { // TODO: Parameterize "selector"? Or rename fn va_selectors? Err("At least one selector must be passed.".into()) } else { Ok(result) } } } #[test] fn test_rgb() -> Result<(), Box<dyn std::error::Error>> { use crate::parser::code_span; use crate::parser::formalargs::call_args; use crate::value::Rgba; let scope = ScopeRef::new_global(Default::default()); assert_eq!( FUNCTIONS.get(&name!(rgb)).unwrap().call( scope.clone(), call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)? )?, Value::Color(Rgba::from_rgb(17, 0, 225).into(), None) ); Ok(()) } #[test] fn test_nth() { assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);")) } #[cfg(test)] use crate::variablescope::test::do_evaluate;
closure
identifier_name
mod.rs
use crate::css::{is_not, CallArgs, CssString, Value}; use crate::error::Error; use crate::output::{Format, Formatted}; use crate::parser::SourcePos; use crate::sass::{FormalArgs, Name}; use crate::value::Numeric; use crate::{sass, Scope, ScopeRef}; use lazy_static::lazy_static; use std::collections::BTreeMap; use std::sync::Arc; use std::{cmp, fmt}; #[macro_use] mod macros; mod color; mod list; mod map; mod math; mod meta; mod selector; mod string; type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync; /// A function that can be called from a sass value. /// /// The function can be either "builtin" (implemented in rust) or /// "user defined" (implemented in scss). #[derive(Clone, Debug, PartialEq, Eq, PartialOrd)] pub struct Function { args: FormalArgs, pos: SourcePos, body: FuncImpl, } #[derive(Clone)] pub enum FuncImpl { Builtin(Arc<BuiltinFn>), /// A user-defined function is really a closure, it has a scope /// where it is defined and a body of items. UserDefined(ScopeRef, Vec<sass::Item>), } impl PartialOrd for FuncImpl { fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> { match (self, rhs) { (&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None, (&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => { Some(cmp::Ordering::Less) } (&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => { Some(cmp::Ordering::Greater) } ( &FuncImpl::UserDefined(ref _sa, ref a), &FuncImpl::UserDefined(ref _sb, ref b), ) => a.partial_cmp(b), } } } impl cmp::PartialEq for FuncImpl { fn eq(&self, rhs: &FuncImpl) -> bool { match (self, rhs) { ( &FuncImpl::UserDefined(ref sa, ref a), &FuncImpl::UserDefined(ref sb, ref b), ) => ScopeRef::is_same(sa, sb) && a == b, (&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => { // Each builtin function is only created once, so this // should be ok. #[allow(clippy::vtable_address_comparisons)] Arc::ptr_eq(a, b) } _ => false, } } } impl cmp::Eq for FuncImpl {} impl fmt::Debug for FuncImpl { fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { FuncImpl::Builtin(_) => write!(out, "(builtin function)"), FuncImpl::UserDefined(..) => { write!(out, "(user-defined function)") } } } } trait Functions { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ); } impl Functions for Scope { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin(&self.get_name(), &name, args, body); self.define_function(name, f); } } impl Function { /// Get a built-in function by name. pub fn get_builtin(name: &Name) -> Option<&'static Function> { FUNCTIONS.get(name) } /// Create a new `Function` from a rust implementation. /// /// Note: This does not expose the function in any scope, it just /// creates it. pub fn builtin( module: &str, name: &Name, args: FormalArgs, body: Arc<BuiltinFn>, ) -> Self { let pos = SourcePos::mock_function(name, &args, module); Function { args, pos, body: FuncImpl::Builtin(body), } } /// Create a new `Function` from a scss implementation. /// /// The scope is where the function is defined, used to bind any /// non-parameter names in the body. pub fn closure( args: FormalArgs, pos: SourcePos, scope: ScopeRef, body: Vec<sass::Item>, ) -> Self { Function { args, pos, body: FuncImpl::UserDefined(scope, body), } } /// Call the function from a given scope and with a given set of /// arguments. pub fn call( &self, callscope: ScopeRef, args: CallArgs, ) -> Result<Value, Error> { let cs = "%%CALLING_SCOPE%%"; match self.body { FuncImpl::Builtin(ref body) => { let s = self.do_eval_args( ScopeRef::new_global(callscope.get_format()), args, )?; s.define_module(cs.into(), callscope); body(&s) } FuncImpl::UserDefined(ref defscope, ref body) => { let s = self.do_eval_args(defscope.clone(), args)?; s.define_module(cs.into(), callscope); Ok(s.eval_body(body)?.unwrap_or(Value::Null)) } } .map(Value::into_calculated) } fn do_eval_args( &self, def: ScopeRef, args: CallArgs, ) -> Result<ScopeRef, Error> { self.args.eval(def, args).map_err(|e| match e { sass::ArgsError::Eval(e) => e, ae => Error::BadArguments(ae.to_string(), self.pos.clone()), }) } } lazy_static! { static ref MODULES: BTreeMap<&'static str, Scope> = { let mut modules = BTreeMap::new(); modules.insert("sass:color", color::create_module()); modules.insert("sass:list", list::create_module()); modules.insert("sass:map", map::create_module()); modules.insert("sass:math", math::create_module()); modules.insert("sass:meta", meta::create_module()); modules.insert("sass:selector", selector::create_module()); modules.insert("sass:string", string::create_module()); modules }; } /// Get a global module (e.g. `sass:math`) by name. pub fn get_global_module(name: &str) -> Option<ScopeRef> { MODULES.get(name).map(ScopeRef::Builtin) } type FunctionMap = BTreeMap<Name, Function>; impl Functions for FunctionMap { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin("", &name, args, body); self.insert(name, f); } } lazy_static! { static ref FUNCTIONS: FunctionMap = { let mut f = BTreeMap::new(); def!(f, if(condition, if_true, if_false), |s| { if s.get("condition")?.is_true() { Ok(s.get("if_true")?) } else { Ok(s.get("if_false")?) } }); color::expose(MODULES.get("sass:color").unwrap(), &mut f); list::expose(MODULES.get("sass:list").unwrap(), &mut f); map::expose(MODULES.get("sass:map").unwrap(), &mut f); math::expose(MODULES.get("sass:math").unwrap(), &mut f); meta::expose(MODULES.get("sass:meta").unwrap(), &mut f); selector::expose(MODULES.get("sass:selector").unwrap(), &mut f); string::expose(MODULES.get("sass:string").unwrap(), &mut f); f }; } // argument helpers for the actual functions trait CheckedArg<T> { fn named(self, name: Name) -> Result<T, Error>; } impl<T> CheckedArg<T> for Result<T, String> { fn named(self, name: Name) -> Result<T, Error> { self.map_err(|e| Error::BadArgument(name, e)) } } fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error> where F: Fn(Value) -> Result<T, String>, { check(s.get(name.as_ref())?).named(name) } fn get_opt_check<T, F>( s: &Scope, name: Name, check: F, ) -> Result<Option<T>, Error> where F: Fn(Value) -> Result<T, String>, { match s.get(name.as_ref())? { Value::Null => Ok(None), v => check(v).named(name).map(Some), } } fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> { get_checked(s, name.into(), check::numeric) } fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> { get_checked(s, name, check::unitless_int) } fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> { get_checked(s, name, check::va_list) } fn expected_to<'a, T>(value: &'a T, cond: &str) -> String where Formatted<'a, T>: std::fmt::Display, { format!( "Expected {} to {}.", Formatted { value, format: Format::introspect() }, cond, ) } mod check { use super::{expected_to, is_not}; use crate::css::{CssString, Value}; use crate::value::{ListSeparator, Number, Numeric}; pub fn numeric(v: Value) -> Result<Numeric, String> { v.numeric_value().map_err(|v| is_not(&v, "a number")) } pub fn int(v: Value) -> Result<i64, String> { numeric(v)? .value .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn unitless(v: Value) -> Result<Number, String> { let val = numeric(v)?; if val.is_no_unit() { Ok(val.value) } else { Err(expected_to(&val, "have no units")) } } pub fn unitless_int(v: Value) -> Result<i64, String> { unitless(v)? .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn string(v: Value) -> Result<CssString, String> { match v { Value::Literal(s) => Ok(s), Value::Call(name, args) => { Ok(format!("{}({})", name, args).into()) } v => Err(is_not(&v, "a string")), } } pub fn va_list(v: Value) -> Result<Vec<Value>, String> { match v { Value::ArgList(args) => { args.check_no_named().map_err(|e| e.to_string())?; Ok(args.positional) } Value::List(v, Some(ListSeparator::Comma), _) => Ok(v), single => Ok(vec![single]), } } pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> { let result = va_list(v)?; if result.is_empty() { // TODO: Parameterize "selector"? Or rename fn va_selectors? Err("At least one selector must be passed.".into()) } else { Ok(result) } } } #[test] fn test_rgb() -> Result<(), Box<dyn std::error::Error>> { use crate::parser::code_span; use crate::parser::formalargs::call_args; use crate::value::Rgba; let scope = ScopeRef::new_global(Default::default()); assert_eq!( FUNCTIONS.get(&name!(rgb)).unwrap().call( scope.clone(), call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)? )?, Value::Color(Rgba::from_rgb(17, 0, 225).into(), None) ); Ok(()) } #[test] fn test_nth() { assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);")) } #[cfg(test)] use crate::variablescope::test::do_evaluate;
get_checked(s, name.into(), check::string) }
random_line_split
mod.rs
use crate::css::{is_not, CallArgs, CssString, Value}; use crate::error::Error; use crate::output::{Format, Formatted}; use crate::parser::SourcePos; use crate::sass::{FormalArgs, Name}; use crate::value::Numeric; use crate::{sass, Scope, ScopeRef}; use lazy_static::lazy_static; use std::collections::BTreeMap; use std::sync::Arc; use std::{cmp, fmt}; #[macro_use] mod macros; mod color; mod list; mod map; mod math; mod meta; mod selector; mod string; type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync; /// A function that can be called from a sass value. /// /// The function can be either "builtin" (implemented in rust) or /// "user defined" (implemented in scss). #[derive(Clone, Debug, PartialEq, Eq, PartialOrd)] pub struct Function { args: FormalArgs, pos: SourcePos, body: FuncImpl, } #[derive(Clone)] pub enum FuncImpl { Builtin(Arc<BuiltinFn>), /// A user-defined function is really a closure, it has a scope /// where it is defined and a body of items. UserDefined(ScopeRef, Vec<sass::Item>), } impl PartialOrd for FuncImpl { fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> { match (self, rhs) { (&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None, (&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => { Some(cmp::Ordering::Less) } (&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => { Some(cmp::Ordering::Greater) } ( &FuncImpl::UserDefined(ref _sa, ref a), &FuncImpl::UserDefined(ref _sb, ref b), ) => a.partial_cmp(b), } } } impl cmp::PartialEq for FuncImpl { fn eq(&self, rhs: &FuncImpl) -> bool { match (self, rhs) { ( &FuncImpl::UserDefined(ref sa, ref a), &FuncImpl::UserDefined(ref sb, ref b), ) => ScopeRef::is_same(sa, sb) && a == b, (&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => { // Each builtin function is only created once, so this // should be ok. #[allow(clippy::vtable_address_comparisons)] Arc::ptr_eq(a, b) } _ => false, } } } impl cmp::Eq for FuncImpl {} impl fmt::Debug for FuncImpl { fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { FuncImpl::Builtin(_) => write!(out, "(builtin function)"), FuncImpl::UserDefined(..) => { write!(out, "(user-defined function)") } } } } trait Functions { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ); } impl Functions for Scope { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin(&self.get_name(), &name, args, body); self.define_function(name, f); } } impl Function { /// Get a built-in function by name. pub fn get_builtin(name: &Name) -> Option<&'static Function> { FUNCTIONS.get(name) } /// Create a new `Function` from a rust implementation. /// /// Note: This does not expose the function in any scope, it just /// creates it. pub fn builtin( module: &str, name: &Name, args: FormalArgs, body: Arc<BuiltinFn>, ) -> Self { let pos = SourcePos::mock_function(name, &args, module); Function { args, pos, body: FuncImpl::Builtin(body), } } /// Create a new `Function` from a scss implementation. /// /// The scope is where the function is defined, used to bind any /// non-parameter names in the body. pub fn closure( args: FormalArgs, pos: SourcePos, scope: ScopeRef, body: Vec<sass::Item>, ) -> Self
/// Call the function from a given scope and with a given set of /// arguments. pub fn call( &self, callscope: ScopeRef, args: CallArgs, ) -> Result<Value, Error> { let cs = "%%CALLING_SCOPE%%"; match self.body { FuncImpl::Builtin(ref body) => { let s = self.do_eval_args( ScopeRef::new_global(callscope.get_format()), args, )?; s.define_module(cs.into(), callscope); body(&s) } FuncImpl::UserDefined(ref defscope, ref body) => { let s = self.do_eval_args(defscope.clone(), args)?; s.define_module(cs.into(), callscope); Ok(s.eval_body(body)?.unwrap_or(Value::Null)) } } .map(Value::into_calculated) } fn do_eval_args( &self, def: ScopeRef, args: CallArgs, ) -> Result<ScopeRef, Error> { self.args.eval(def, args).map_err(|e| match e { sass::ArgsError::Eval(e) => e, ae => Error::BadArguments(ae.to_string(), self.pos.clone()), }) } } lazy_static! { static ref MODULES: BTreeMap<&'static str, Scope> = { let mut modules = BTreeMap::new(); modules.insert("sass:color", color::create_module()); modules.insert("sass:list", list::create_module()); modules.insert("sass:map", map::create_module()); modules.insert("sass:math", math::create_module()); modules.insert("sass:meta", meta::create_module()); modules.insert("sass:selector", selector::create_module()); modules.insert("sass:string", string::create_module()); modules }; } /// Get a global module (e.g. `sass:math`) by name. pub fn get_global_module(name: &str) -> Option<ScopeRef> { MODULES.get(name).map(ScopeRef::Builtin) } type FunctionMap = BTreeMap<Name, Function>; impl Functions for FunctionMap { fn builtin_fn( &mut self, name: Name, args: FormalArgs, body: Arc<BuiltinFn>, ) { let f = Function::builtin("", &name, args, body); self.insert(name, f); } } lazy_static! { static ref FUNCTIONS: FunctionMap = { let mut f = BTreeMap::new(); def!(f, if(condition, if_true, if_false), |s| { if s.get("condition")?.is_true() { Ok(s.get("if_true")?) } else { Ok(s.get("if_false")?) } }); color::expose(MODULES.get("sass:color").unwrap(), &mut f); list::expose(MODULES.get("sass:list").unwrap(), &mut f); map::expose(MODULES.get("sass:map").unwrap(), &mut f); math::expose(MODULES.get("sass:math").unwrap(), &mut f); meta::expose(MODULES.get("sass:meta").unwrap(), &mut f); selector::expose(MODULES.get("sass:selector").unwrap(), &mut f); string::expose(MODULES.get("sass:string").unwrap(), &mut f); f }; } // argument helpers for the actual functions trait CheckedArg<T> { fn named(self, name: Name) -> Result<T, Error>; } impl<T> CheckedArg<T> for Result<T, String> { fn named(self, name: Name) -> Result<T, Error> { self.map_err(|e| Error::BadArgument(name, e)) } } fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error> where F: Fn(Value) -> Result<T, String>, { check(s.get(name.as_ref())?).named(name) } fn get_opt_check<T, F>( s: &Scope, name: Name, check: F, ) -> Result<Option<T>, Error> where F: Fn(Value) -> Result<T, String>, { match s.get(name.as_ref())? { Value::Null => Ok(None), v => check(v).named(name).map(Some), } } fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> { get_checked(s, name.into(), check::numeric) } fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> { get_checked(s, name, check::unitless_int) } fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> { get_checked(s, name.into(), check::string) } fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> { get_checked(s, name, check::va_list) } fn expected_to<'a, T>(value: &'a T, cond: &str) -> String where Formatted<'a, T>: std::fmt::Display, { format!( "Expected {} to {}.", Formatted { value, format: Format::introspect() }, cond, ) } mod check { use super::{expected_to, is_not}; use crate::css::{CssString, Value}; use crate::value::{ListSeparator, Number, Numeric}; pub fn numeric(v: Value) -> Result<Numeric, String> { v.numeric_value().map_err(|v| is_not(&v, "a number")) } pub fn int(v: Value) -> Result<i64, String> { numeric(v)? .value .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn unitless(v: Value) -> Result<Number, String> { let val = numeric(v)?; if val.is_no_unit() { Ok(val.value) } else { Err(expected_to(&val, "have no units")) } } pub fn unitless_int(v: Value) -> Result<i64, String> { unitless(v)? .into_integer() .map_err(|v| is_not(&v, "an int")) } pub fn string(v: Value) -> Result<CssString, String> { match v { Value::Literal(s) => Ok(s), Value::Call(name, args) => { Ok(format!("{}({})", name, args).into()) } v => Err(is_not(&v, "a string")), } } pub fn va_list(v: Value) -> Result<Vec<Value>, String> { match v { Value::ArgList(args) => { args.check_no_named().map_err(|e| e.to_string())?; Ok(args.positional) } Value::List(v, Some(ListSeparator::Comma), _) => Ok(v), single => Ok(vec![single]), } } pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> { let result = va_list(v)?; if result.is_empty() { // TODO: Parameterize "selector"? Or rename fn va_selectors? Err("At least one selector must be passed.".into()) } else { Ok(result) } } } #[test] fn test_rgb() -> Result<(), Box<dyn std::error::Error>> { use crate::parser::code_span; use crate::parser::formalargs::call_args; use crate::value::Rgba; let scope = ScopeRef::new_global(Default::default()); assert_eq!( FUNCTIONS.get(&name!(rgb)).unwrap().call( scope.clone(), call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)? )?, Value::Color(Rgba::from_rgb(17, 0, 225).into(), None) ); Ok(()) } #[test] fn test_nth() { assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);")) } #[cfg(test)] use crate::variablescope::test::do_evaluate;
{ Function { args, pos, body: FuncImpl::UserDefined(scope, body), } }
identifier_body
tic_tac_toe.py
""" Tic Tac Toe TODO * Refactor code to make it more AI tournament-friendly * ai_move function must only take in game state next time ^ re factor as class? (keep track of game_tree and trash talk flag) """ import collections from functools import partial import logging import random import numpy as np try: import ujson as json except ImportError: import json GAME_TREE_FILE = "tic-tac-toe_game_tree.json" class TicTacToe(object): """TicTacToe object representing a game state Notes: * game states are encoded as 9-character strings * valid characters are "1", "2", and "_" """ def __init__(self, state, symbols=("O", "X")): """ PARAMETERS state (str): 9 character string representing state symbols (tuple): 2-element tuple representing game symbols * tuple in first position goes first; vice versa """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" self.state = state self.counter = collections.Counter(state) self.validate_state(state) self.symbols = symbols self.data = np.array(list(state)) self.board = self.data.reshape((3, 3)) self.game_status = self.check_game_status() self.next_moves = self.find_next_states() self.next_states = list(self.next_moves.values()) def validate_state(self, state): """Check if a state is valid""" assert isinstance(state, str), "`state` must be of type str" assert len(state) == 9, "`state` must have exactly 9 elements" state = state.upper() assert set(state) <= { "1", "2", "_", }, "`state` elements must be in {'1', '2', '_'}" assert ( 0 <= self.counter["1"] - self.counter["2"] <= 1 ), "Invalid state: Players must alternate." def check_game_status(self): """Check status of game (turn, tie, win)""" for player in ("1", "2"): row_win = np.apply_along_axis( lambda x: set(x) == {player}, 1, self.board ).any() col_win = np.apply_along_axis( lambda x: set(x) == {player}, 0, self.board ).any() d1_win = set(self.data[[0, 4, 8]]) == {player} d2_win = set(self.data[[2, 4, 6]]) == {player} if any([row_win, col_win, d1_win, d2_win]): return ("win", player) if self.counter["_"] == 0: return ("tie", None) else: return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2") def find_next_states(self): """Determine possible next moves. Returns a dict {index: new_state}""" status, player = self.game_status moves = {} if status == "turn": for idx in np.where(self.data == "_")[0]: new_move = self.data.copy() new_move[idx] = player moves[idx] = "".join(new_move) return moves def printable_board(self, indent_char="\t", legend_hint=True, symbols=None):
def gen_game_tree(state_init): """Generate full game tree from initial state""" current_path = [state_init] game_tree = {} while current_path: cur_state = current_path[-1] if cur_state not in game_tree: ttt = TicTacToe(cur_state) game_tree[cur_state] = { "unexplored": ttt.next_states, "explored": [], "status": ttt.game_status, } if game_tree[cur_state]["unexplored"]: current_path.append(game_tree[cur_state]["unexplored"].pop(0)) else: explored = current_path.pop(-1) if explored != state_init: game_tree[current_path[-1]]["explored"].append(explored) status, player = game_tree[cur_state]["status"] if status == "tie": value = 0 outcomes = {0: 1} elif status == "win": value = -1 if player == "1" else 1 outcomes = {value: 1} else: value = (min if player == "1" else max)( [ game_tree[state]["value"] for state in game_tree[cur_state]["explored"] ] ) outcomes = {} for state in game_tree[cur_state]["explored"]: for res, res_ct in game_tree[state]["outcomes"].items(): outcomes[res] = outcomes.get(res, 0) + res_ct game_tree[cur_state]["value"] = value game_tree[cur_state]["outcomes"] = outcomes return game_tree def answer_exercise(): """Function to answer exercise in Chapter 2 section III""" state = "1212__21_" ttt = TicTacToe(state) game_tree = gen_game_tree(state) print( f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n" f"is {game_tree[state]['value']}" ) return game_tree[state]["value"] def learn(state="_________"): """Build game tree and export given initial state""" game_tree = gen_game_tree(state) with open(GAME_TREE_FILE, "w") as gt_file: json.dump(game_tree, gt_file, indent=4) def human(gstate: TicTacToe, *args): """Function for a human player""" return input_with_validation("Please enter move.", list(gstate.next_moves.keys())) def ai_w_level(gstate: TicTacToe, game_tree, level=3): """AI with levels Level Descriptions * 0 = stupid * 1 = easy * 2 = medium * 3 = hard * 4 = unfair """ assert isinstance(level, int), "`level` must be `int`" assert 0 <= level <= 4, "level values must be from 0 to 4" seed = random.random() logging.debug(f"seed value: {seed:.3f}") if level == 0: ai_func = ai_derp elif level == 1: ai_func = ai_derp if seed <= 0.3 else ai_strategy1 elif level == 2: ai_func = ai_derp if seed <= 0.2 else ai_strategy2 elif level == 3: ai_func = ai_derp if seed <= 0.1 else ai_strategy3 elif level == 4: ai_func = ai_strategy3 return ai_func(gstate, game_tree) def ai_derp(gstate: TicTacToe, *args): """AI that randomly picks the next move""" return random.choice(list(gstate.next_moves.keys())) def ai_strategy1(gstate: TicTacToe, game_tree): """Strategy assuming opponent plays optimally""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None mod = -1 if player == "1" else 1 next_move_vals = { idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = [idx for idx, val in next_move_vals.items() if val == max_val] logging.debug("moves: %s; value: %i", moves, max_val) move = random.choice(moves) return move def ai_strategy2(gstate: TicTacToe, game_tree): """Strategy maximizing the number of paths to winning end states""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) next_move_vals = { idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = { idx: ( game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0) ) for idx, val in next_move_vals.items() if val == max_val } win_ct = {idx: vals[0] for idx, vals in moves.items()} win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()} lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()} wl_ratio = {idx: vals[0] / max(vals[2], 0.5) for idx, vals in moves.items()} # criteria, agg_func = lose_pct, min # criteria, agg_func = win_pct, max criteria, agg_func = wl_ratio, max if max_val == 1 and 1 in win_ct.values(): move = [idx for idx, val in win_ct.items() if val == 1][0] else: move = random.choice( [idx for idx, val in criteria.items() if val == agg_func(criteria.values())] ) logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n", move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves) # trash talk if max_val == 1: print( "*beep* *boop* *beep*" " -=[ I calculate chances of winning to be 100% ]=- " "*beep* *boop* *beep*" ) return move def ai_strategy3(gstate: TicTacToe, game_tree): """AI strategy that maximizes the opponent's losing moves in the next turn""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) move_vals = { move: win * game_tree[state]["value"] for move, state in gstate.next_moves.items() } max_val = max(move_vals.values()) if max_val == 1: # ai_strategy2 can handle "won" states return ai_strategy2(gstate, game_tree) else: ok_moves = [move for move, val in move_vals.items() if val == max_val] move_vals = { move: collections.Counter( [ game_tree[state2]["value"] for state2 in game_tree[gstate.next_moves[move]]["explored"] ] ) for move in ok_moves } move_eval = { move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5) for move, val_ctr in move_vals.items() } max_win_pct = max(move_eval.values()) good_moves = [move for move, win_pct in move_eval.items() if win_pct == max_win_pct] move = random.choice(good_moves) all_ct = sum(move_vals[move].values()) win_ct = move_vals[move].get(win, 0) logging.debug( "move: %i; value: %i, win %%: %.1f%%, moves: %s\n", move, win * game_tree[gstate.state]["value"], win_ct / max(all_ct, 0.1) * 100, move_vals ) return move def input_with_validation(text, choices): """Take input with validation""" choice_vals = set(map(str, choices)) while True: val = input(f"{text} | choices={choices}: ") if val in choice_vals: return val else: print(f"{val} is not a valid value. Please choose from: {choices}") def start_game(player1, player2, symbols=("O", "X")): """Starts a command line tic tac toe game between 2 players (bot or human)""" assert len(symbols) == 2, "`symbols` must have exactly 2 elements" gstate = TicTacToe("_________", symbols=symbols) with open(GAME_TREE_FILE, "r") as gt_file: game_tree = json.load(gt_file) while True: status, player = gstate.game_status if status == "turn": print(f"\n=== Player {player}'s turn:\n\n") print(gstate.printable_board(legend_hint=True)) print("\n") if player == "1": p_move = player1(gstate, game_tree) else: p_move = player2(gstate, game_tree) print(f"\n>>> Player {player} has chosen: {p_move}") new_state = gstate.data.copy() new_state[int(p_move)] = player gstate = TicTacToe("".join(new_state), symbols=symbols) else: print('\n') print(gstate.printable_board(legend_hint=True)) if status == "win": print(f"\n~~~~~ Player {player} wins! ~~~~~\n") else: print("\n~~~~~ It's a Tie! ~~~~~\n") return gstate.game_status def menu(n_player=None, symbols=("O", "X"), ai_1=None, ai_2=None): """start CLI based game menu PARAMETERS n_player (int): number of players * 0 = ai_1 vs ai_2; order decided at random * 1 = human vs ai_1; order decided by player * 2 = human vs human symbols (tuple): 2 element tuple for board symbols (in order) ai_1, ai_2 (callable): ai function that takes the following as parameters: * gstate (TicTacToe): game state * game_tree (dict): game tree JSON * TODO: in the future, each AI class should store its own game tree """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" ai_1 = ai_1 or ai_strategy3 ai_2 = ai_2 or ai_derp print("\n*** Let's play TIC TAC TOE! ***\n") n_player = n_player if n_player is not None else int( input_with_validation("Choose number of players.", ["0", "1", "2"]) ) if n_player < 2: if n_player == 1: player_symbol = input_with_validation( f"Choose symbol ({symbols[0]} goes first).", symbols) player_order = symbols.index(player_symbol) player1, player2 = [human, ai_1][::(1 - player_order * 2)] else: if random.random() > 0.5: print("\n\t* O: ai_2, X: ai_1...") player1, player2 = ai_2, ai_1 else: print("\n\t* O: ai_1, X: ai_2...") player1, player2 = ai_1, ai_2 else: player1, player2 = human, human start_game(player1, player2, symbols=symbols) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) LEVEL_DESCS = ["stupid", "easy", "normal", "hard", "unfair"] LEVEL = 2 print(f"\n[ You're playing with an AI at level {LEVEL} ({LEVEL_DESCS[LEVEL]}) ]\n") menu(n_player=1, ai_1=partial(ai_w_level, level=LEVEL), symbols=("O","X"))
"""Returns a string representing the game board for printing""" symbols = symbols or self.symbols assert len(symbols) == 2, "`symbols` must have exactly 2 elements" data_symbols = self.data.copy() for orig, new in zip(("1", "2"), symbols): data_symbols[data_symbols == orig] = new board_symbols = data_symbols.reshape((3, 3)) if legend_hint: legend_board = np.where( self.data == "_", range(9), " ").reshape((3, 3)) return "\n".join( [indent_char + "GAME | INDEX"] + [indent_char + "===== | ====="] + [ indent_char + " ".join(b_row) + " | " + " ".join(l_row) for b_row, l_row in zip(board_symbols, legend_board) ] ) else: return "\n".join([indent_char + " ".join(row) for row in board_symbols])
identifier_body
tic_tac_toe.py
""" Tic Tac Toe TODO * Refactor code to make it more AI tournament-friendly * ai_move function must only take in game state next time ^ re factor as class? (keep track of game_tree and trash talk flag) """ import collections from functools import partial import logging import random import numpy as np try: import ujson as json except ImportError: import json GAME_TREE_FILE = "tic-tac-toe_game_tree.json" class TicTacToe(object): """TicTacToe object representing a game state Notes: * game states are encoded as 9-character strings * valid characters are "1", "2", and "_" """ def __init__(self, state, symbols=("O", "X")): """ PARAMETERS state (str): 9 character string representing state symbols (tuple): 2-element tuple representing game symbols * tuple in first position goes first; vice versa """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" self.state = state self.counter = collections.Counter(state) self.validate_state(state) self.symbols = symbols self.data = np.array(list(state)) self.board = self.data.reshape((3, 3)) self.game_status = self.check_game_status() self.next_moves = self.find_next_states() self.next_states = list(self.next_moves.values()) def validate_state(self, state): """Check if a state is valid""" assert isinstance(state, str), "`state` must be of type str" assert len(state) == 9, "`state` must have exactly 9 elements" state = state.upper() assert set(state) <= { "1", "2", "_", }, "`state` elements must be in {'1', '2', '_'}" assert ( 0 <= self.counter["1"] - self.counter["2"] <= 1 ), "Invalid state: Players must alternate." def check_game_status(self): """Check status of game (turn, tie, win)""" for player in ("1", "2"): row_win = np.apply_along_axis( lambda x: set(x) == {player}, 1, self.board ).any() col_win = np.apply_along_axis( lambda x: set(x) == {player}, 0, self.board ).any() d1_win = set(self.data[[0, 4, 8]]) == {player} d2_win = set(self.data[[2, 4, 6]]) == {player} if any([row_win, col_win, d1_win, d2_win]): return ("win", player) if self.counter["_"] == 0: return ("tie", None) else: return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2") def
(self): """Determine possible next moves. Returns a dict {index: new_state}""" status, player = self.game_status moves = {} if status == "turn": for idx in np.where(self.data == "_")[0]: new_move = self.data.copy() new_move[idx] = player moves[idx] = "".join(new_move) return moves def printable_board(self, indent_char="\t", legend_hint=True, symbols=None): """Returns a string representing the game board for printing""" symbols = symbols or self.symbols assert len(symbols) == 2, "`symbols` must have exactly 2 elements" data_symbols = self.data.copy() for orig, new in zip(("1", "2"), symbols): data_symbols[data_symbols == orig] = new board_symbols = data_symbols.reshape((3, 3)) if legend_hint: legend_board = np.where( self.data == "_", range(9), " ").reshape((3, 3)) return "\n".join( [indent_char + "GAME | INDEX"] + [indent_char + "===== | ====="] + [ indent_char + " ".join(b_row) + " | " + " ".join(l_row) for b_row, l_row in zip(board_symbols, legend_board) ] ) else: return "\n".join([indent_char + " ".join(row) for row in board_symbols]) def gen_game_tree(state_init): """Generate full game tree from initial state""" current_path = [state_init] game_tree = {} while current_path: cur_state = current_path[-1] if cur_state not in game_tree: ttt = TicTacToe(cur_state) game_tree[cur_state] = { "unexplored": ttt.next_states, "explored": [], "status": ttt.game_status, } if game_tree[cur_state]["unexplored"]: current_path.append(game_tree[cur_state]["unexplored"].pop(0)) else: explored = current_path.pop(-1) if explored != state_init: game_tree[current_path[-1]]["explored"].append(explored) status, player = game_tree[cur_state]["status"] if status == "tie": value = 0 outcomes = {0: 1} elif status == "win": value = -1 if player == "1" else 1 outcomes = {value: 1} else: value = (min if player == "1" else max)( [ game_tree[state]["value"] for state in game_tree[cur_state]["explored"] ] ) outcomes = {} for state in game_tree[cur_state]["explored"]: for res, res_ct in game_tree[state]["outcomes"].items(): outcomes[res] = outcomes.get(res, 0) + res_ct game_tree[cur_state]["value"] = value game_tree[cur_state]["outcomes"] = outcomes return game_tree def answer_exercise(): """Function to answer exercise in Chapter 2 section III""" state = "1212__21_" ttt = TicTacToe(state) game_tree = gen_game_tree(state) print( f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n" f"is {game_tree[state]['value']}" ) return game_tree[state]["value"] def learn(state="_________"): """Build game tree and export given initial state""" game_tree = gen_game_tree(state) with open(GAME_TREE_FILE, "w") as gt_file: json.dump(game_tree, gt_file, indent=4) def human(gstate: TicTacToe, *args): """Function for a human player""" return input_with_validation("Please enter move.", list(gstate.next_moves.keys())) def ai_w_level(gstate: TicTacToe, game_tree, level=3): """AI with levels Level Descriptions * 0 = stupid * 1 = easy * 2 = medium * 3 = hard * 4 = unfair """ assert isinstance(level, int), "`level` must be `int`" assert 0 <= level <= 4, "level values must be from 0 to 4" seed = random.random() logging.debug(f"seed value: {seed:.3f}") if level == 0: ai_func = ai_derp elif level == 1: ai_func = ai_derp if seed <= 0.3 else ai_strategy1 elif level == 2: ai_func = ai_derp if seed <= 0.2 else ai_strategy2 elif level == 3: ai_func = ai_derp if seed <= 0.1 else ai_strategy3 elif level == 4: ai_func = ai_strategy3 return ai_func(gstate, game_tree) def ai_derp(gstate: TicTacToe, *args): """AI that randomly picks the next move""" return random.choice(list(gstate.next_moves.keys())) def ai_strategy1(gstate: TicTacToe, game_tree): """Strategy assuming opponent plays optimally""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None mod = -1 if player == "1" else 1 next_move_vals = { idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = [idx for idx, val in next_move_vals.items() if val == max_val] logging.debug("moves: %s; value: %i", moves, max_val) move = random.choice(moves) return move def ai_strategy2(gstate: TicTacToe, game_tree): """Strategy maximizing the number of paths to winning end states""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) next_move_vals = { idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = { idx: ( game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0) ) for idx, val in next_move_vals.items() if val == max_val } win_ct = {idx: vals[0] for idx, vals in moves.items()} win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()} lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()} wl_ratio = {idx: vals[0] / max(vals[2], 0.5) for idx, vals in moves.items()} # criteria, agg_func = lose_pct, min # criteria, agg_func = win_pct, max criteria, agg_func = wl_ratio, max if max_val == 1 and 1 in win_ct.values(): move = [idx for idx, val in win_ct.items() if val == 1][0] else: move = random.choice( [idx for idx, val in criteria.items() if val == agg_func(criteria.values())] ) logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n", move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves) # trash talk if max_val == 1: print( "*beep* *boop* *beep*" " -=[ I calculate chances of winning to be 100% ]=- " "*beep* *boop* *beep*" ) return move def ai_strategy3(gstate: TicTacToe, game_tree): """AI strategy that maximizes the opponent's losing moves in the next turn""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) move_vals = { move: win * game_tree[state]["value"] for move, state in gstate.next_moves.items() } max_val = max(move_vals.values()) if max_val == 1: # ai_strategy2 can handle "won" states return ai_strategy2(gstate, game_tree) else: ok_moves = [move for move, val in move_vals.items() if val == max_val] move_vals = { move: collections.Counter( [ game_tree[state2]["value"] for state2 in game_tree[gstate.next_moves[move]]["explored"] ] ) for move in ok_moves } move_eval = { move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5) for move, val_ctr in move_vals.items() } max_win_pct = max(move_eval.values()) good_moves = [move for move, win_pct in move_eval.items() if win_pct == max_win_pct] move = random.choice(good_moves) all_ct = sum(move_vals[move].values()) win_ct = move_vals[move].get(win, 0) logging.debug( "move: %i; value: %i, win %%: %.1f%%, moves: %s\n", move, win * game_tree[gstate.state]["value"], win_ct / max(all_ct, 0.1) * 100, move_vals ) return move def input_with_validation(text, choices): """Take input with validation""" choice_vals = set(map(str, choices)) while True: val = input(f"{text} | choices={choices}: ") if val in choice_vals: return val else: print(f"{val} is not a valid value. Please choose from: {choices}") def start_game(player1, player2, symbols=("O", "X")): """Starts a command line tic tac toe game between 2 players (bot or human)""" assert len(symbols) == 2, "`symbols` must have exactly 2 elements" gstate = TicTacToe("_________", symbols=symbols) with open(GAME_TREE_FILE, "r") as gt_file: game_tree = json.load(gt_file) while True: status, player = gstate.game_status if status == "turn": print(f"\n=== Player {player}'s turn:\n\n") print(gstate.printable_board(legend_hint=True)) print("\n") if player == "1": p_move = player1(gstate, game_tree) else: p_move = player2(gstate, game_tree) print(f"\n>>> Player {player} has chosen: {p_move}") new_state = gstate.data.copy() new_state[int(p_move)] = player gstate = TicTacToe("".join(new_state), symbols=symbols) else: print('\n') print(gstate.printable_board(legend_hint=True)) if status == "win": print(f"\n~~~~~ Player {player} wins! ~~~~~\n") else: print("\n~~~~~ It's a Tie! ~~~~~\n") return gstate.game_status def menu(n_player=None, symbols=("O", "X"), ai_1=None, ai_2=None): """start CLI based game menu PARAMETERS n_player (int): number of players * 0 = ai_1 vs ai_2; order decided at random * 1 = human vs ai_1; order decided by player * 2 = human vs human symbols (tuple): 2 element tuple for board symbols (in order) ai_1, ai_2 (callable): ai function that takes the following as parameters: * gstate (TicTacToe): game state * game_tree (dict): game tree JSON * TODO: in the future, each AI class should store its own game tree """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" ai_1 = ai_1 or ai_strategy3 ai_2 = ai_2 or ai_derp print("\n*** Let's play TIC TAC TOE! ***\n") n_player = n_player if n_player is not None else int( input_with_validation("Choose number of players.", ["0", "1", "2"]) ) if n_player < 2: if n_player == 1: player_symbol = input_with_validation( f"Choose symbol ({symbols[0]} goes first).", symbols) player_order = symbols.index(player_symbol) player1, player2 = [human, ai_1][::(1 - player_order * 2)] else: if random.random() > 0.5: print("\n\t* O: ai_2, X: ai_1...") player1, player2 = ai_2, ai_1 else: print("\n\t* O: ai_1, X: ai_2...") player1, player2 = ai_1, ai_2 else: player1, player2 = human, human start_game(player1, player2, symbols=symbols) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) LEVEL_DESCS = ["stupid", "easy", "normal", "hard", "unfair"] LEVEL = 2 print(f"\n[ You're playing with an AI at level {LEVEL} ({LEVEL_DESCS[LEVEL]}) ]\n") menu(n_player=1, ai_1=partial(ai_w_level, level=LEVEL), symbols=("O","X"))
find_next_states
identifier_name
tic_tac_toe.py
""" Tic Tac Toe TODO * Refactor code to make it more AI tournament-friendly * ai_move function must only take in game state next time ^ re factor as class? (keep track of game_tree and trash talk flag) """ import collections from functools import partial import logging import random import numpy as np try: import ujson as json except ImportError: import json GAME_TREE_FILE = "tic-tac-toe_game_tree.json" class TicTacToe(object): """TicTacToe object representing a game state Notes: * game states are encoded as 9-character strings * valid characters are "1", "2", and "_" """ def __init__(self, state, symbols=("O", "X")): """ PARAMETERS state (str): 9 character string representing state symbols (tuple): 2-element tuple representing game symbols * tuple in first position goes first; vice versa """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" self.state = state self.counter = collections.Counter(state) self.validate_state(state) self.symbols = symbols self.data = np.array(list(state)) self.board = self.data.reshape((3, 3)) self.game_status = self.check_game_status() self.next_moves = self.find_next_states() self.next_states = list(self.next_moves.values()) def validate_state(self, state): """Check if a state is valid""" assert isinstance(state, str), "`state` must be of type str" assert len(state) == 9, "`state` must have exactly 9 elements" state = state.upper() assert set(state) <= { "1", "2", "_", }, "`state` elements must be in {'1', '2', '_'}" assert ( 0 <= self.counter["1"] - self.counter["2"] <= 1 ), "Invalid state: Players must alternate." def check_game_status(self): """Check status of game (turn, tie, win)""" for player in ("1", "2"): row_win = np.apply_along_axis( lambda x: set(x) == {player}, 1, self.board ).any() col_win = np.apply_along_axis( lambda x: set(x) == {player}, 0, self.board ).any() d1_win = set(self.data[[0, 4, 8]]) == {player} d2_win = set(self.data[[2, 4, 6]]) == {player} if any([row_win, col_win, d1_win, d2_win]): return ("win", player) if self.counter["_"] == 0: return ("tie", None) else: return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2") def find_next_states(self): """Determine possible next moves. Returns a dict {index: new_state}""" status, player = self.game_status moves = {} if status == "turn": for idx in np.where(self.data == "_")[0]: new_move = self.data.copy() new_move[idx] = player moves[idx] = "".join(new_move) return moves def printable_board(self, indent_char="\t", legend_hint=True, symbols=None): """Returns a string representing the game board for printing""" symbols = symbols or self.symbols assert len(symbols) == 2, "`symbols` must have exactly 2 elements" data_symbols = self.data.copy() for orig, new in zip(("1", "2"), symbols):
board_symbols = data_symbols.reshape((3, 3)) if legend_hint: legend_board = np.where( self.data == "_", range(9), " ").reshape((3, 3)) return "\n".join( [indent_char + "GAME | INDEX"] + [indent_char + "===== | ====="] + [ indent_char + " ".join(b_row) + " | " + " ".join(l_row) for b_row, l_row in zip(board_symbols, legend_board) ] ) else: return "\n".join([indent_char + " ".join(row) for row in board_symbols]) def gen_game_tree(state_init): """Generate full game tree from initial state""" current_path = [state_init] game_tree = {} while current_path: cur_state = current_path[-1] if cur_state not in game_tree: ttt = TicTacToe(cur_state) game_tree[cur_state] = { "unexplored": ttt.next_states, "explored": [], "status": ttt.game_status, } if game_tree[cur_state]["unexplored"]: current_path.append(game_tree[cur_state]["unexplored"].pop(0)) else: explored = current_path.pop(-1) if explored != state_init: game_tree[current_path[-1]]["explored"].append(explored) status, player = game_tree[cur_state]["status"] if status == "tie": value = 0 outcomes = {0: 1} elif status == "win": value = -1 if player == "1" else 1 outcomes = {value: 1} else: value = (min if player == "1" else max)( [ game_tree[state]["value"] for state in game_tree[cur_state]["explored"] ] ) outcomes = {} for state in game_tree[cur_state]["explored"]: for res, res_ct in game_tree[state]["outcomes"].items(): outcomes[res] = outcomes.get(res, 0) + res_ct game_tree[cur_state]["value"] = value game_tree[cur_state]["outcomes"] = outcomes return game_tree def answer_exercise(): """Function to answer exercise in Chapter 2 section III""" state = "1212__21_" ttt = TicTacToe(state) game_tree = gen_game_tree(state) print( f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n" f"is {game_tree[state]['value']}" ) return game_tree[state]["value"] def learn(state="_________"): """Build game tree and export given initial state""" game_tree = gen_game_tree(state) with open(GAME_TREE_FILE, "w") as gt_file: json.dump(game_tree, gt_file, indent=4) def human(gstate: TicTacToe, *args): """Function for a human player""" return input_with_validation("Please enter move.", list(gstate.next_moves.keys())) def ai_w_level(gstate: TicTacToe, game_tree, level=3): """AI with levels Level Descriptions * 0 = stupid * 1 = easy * 2 = medium * 3 = hard * 4 = unfair """ assert isinstance(level, int), "`level` must be `int`" assert 0 <= level <= 4, "level values must be from 0 to 4" seed = random.random() logging.debug(f"seed value: {seed:.3f}") if level == 0: ai_func = ai_derp elif level == 1: ai_func = ai_derp if seed <= 0.3 else ai_strategy1 elif level == 2: ai_func = ai_derp if seed <= 0.2 else ai_strategy2 elif level == 3: ai_func = ai_derp if seed <= 0.1 else ai_strategy3 elif level == 4: ai_func = ai_strategy3 return ai_func(gstate, game_tree) def ai_derp(gstate: TicTacToe, *args): """AI that randomly picks the next move""" return random.choice(list(gstate.next_moves.keys())) def ai_strategy1(gstate: TicTacToe, game_tree): """Strategy assuming opponent plays optimally""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None mod = -1 if player == "1" else 1 next_move_vals = { idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = [idx for idx, val in next_move_vals.items() if val == max_val] logging.debug("moves: %s; value: %i", moves, max_val) move = random.choice(moves) return move def ai_strategy2(gstate: TicTacToe, game_tree): """Strategy maximizing the number of paths to winning end states""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) next_move_vals = { idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = { idx: ( game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0) ) for idx, val in next_move_vals.items() if val == max_val } win_ct = {idx: vals[0] for idx, vals in moves.items()} win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()} lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()} wl_ratio = {idx: vals[0] / max(vals[2], 0.5) for idx, vals in moves.items()} # criteria, agg_func = lose_pct, min # criteria, agg_func = win_pct, max criteria, agg_func = wl_ratio, max if max_val == 1 and 1 in win_ct.values(): move = [idx for idx, val in win_ct.items() if val == 1][0] else: move = random.choice( [idx for idx, val in criteria.items() if val == agg_func(criteria.values())] ) logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n", move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves) # trash talk if max_val == 1: print( "*beep* *boop* *beep*" " -=[ I calculate chances of winning to be 100% ]=- " "*beep* *boop* *beep*" ) return move def ai_strategy3(gstate: TicTacToe, game_tree): """AI strategy that maximizes the opponent's losing moves in the next turn""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) move_vals = { move: win * game_tree[state]["value"] for move, state in gstate.next_moves.items() } max_val = max(move_vals.values()) if max_val == 1: # ai_strategy2 can handle "won" states return ai_strategy2(gstate, game_tree) else: ok_moves = [move for move, val in move_vals.items() if val == max_val] move_vals = { move: collections.Counter( [ game_tree[state2]["value"] for state2 in game_tree[gstate.next_moves[move]]["explored"] ] ) for move in ok_moves } move_eval = { move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5) for move, val_ctr in move_vals.items() } max_win_pct = max(move_eval.values()) good_moves = [move for move, win_pct in move_eval.items() if win_pct == max_win_pct] move = random.choice(good_moves) all_ct = sum(move_vals[move].values()) win_ct = move_vals[move].get(win, 0) logging.debug( "move: %i; value: %i, win %%: %.1f%%, moves: %s\n", move, win * game_tree[gstate.state]["value"], win_ct / max(all_ct, 0.1) * 100, move_vals ) return move def input_with_validation(text, choices): """Take input with validation""" choice_vals = set(map(str, choices)) while True: val = input(f"{text} | choices={choices}: ") if val in choice_vals: return val else: print(f"{val} is not a valid value. Please choose from: {choices}") def start_game(player1, player2, symbols=("O", "X")): """Starts a command line tic tac toe game between 2 players (bot or human)""" assert len(symbols) == 2, "`symbols` must have exactly 2 elements" gstate = TicTacToe("_________", symbols=symbols) with open(GAME_TREE_FILE, "r") as gt_file: game_tree = json.load(gt_file) while True: status, player = gstate.game_status if status == "turn": print(f"\n=== Player {player}'s turn:\n\n") print(gstate.printable_board(legend_hint=True)) print("\n") if player == "1": p_move = player1(gstate, game_tree) else: p_move = player2(gstate, game_tree) print(f"\n>>> Player {player} has chosen: {p_move}") new_state = gstate.data.copy() new_state[int(p_move)] = player gstate = TicTacToe("".join(new_state), symbols=symbols) else: print('\n') print(gstate.printable_board(legend_hint=True)) if status == "win": print(f"\n~~~~~ Player {player} wins! ~~~~~\n") else: print("\n~~~~~ It's a Tie! ~~~~~\n") return gstate.game_status def menu(n_player=None, symbols=("O", "X"), ai_1=None, ai_2=None): """start CLI based game menu PARAMETERS n_player (int): number of players * 0 = ai_1 vs ai_2; order decided at random * 1 = human vs ai_1; order decided by player * 2 = human vs human symbols (tuple): 2 element tuple for board symbols (in order) ai_1, ai_2 (callable): ai function that takes the following as parameters: * gstate (TicTacToe): game state * game_tree (dict): game tree JSON * TODO: in the future, each AI class should store its own game tree """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" ai_1 = ai_1 or ai_strategy3 ai_2 = ai_2 or ai_derp print("\n*** Let's play TIC TAC TOE! ***\n") n_player = n_player if n_player is not None else int( input_with_validation("Choose number of players.", ["0", "1", "2"]) ) if n_player < 2: if n_player == 1: player_symbol = input_with_validation( f"Choose symbol ({symbols[0]} goes first).", symbols) player_order = symbols.index(player_symbol) player1, player2 = [human, ai_1][::(1 - player_order * 2)] else: if random.random() > 0.5: print("\n\t* O: ai_2, X: ai_1...") player1, player2 = ai_2, ai_1 else: print("\n\t* O: ai_1, X: ai_2...") player1, player2 = ai_1, ai_2 else: player1, player2 = human, human start_game(player1, player2, symbols=symbols) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) LEVEL_DESCS = ["stupid", "easy", "normal", "hard", "unfair"] LEVEL = 2 print(f"\n[ You're playing with an AI at level {LEVEL} ({LEVEL_DESCS[LEVEL]}) ]\n") menu(n_player=1, ai_1=partial(ai_w_level, level=LEVEL), symbols=("O","X"))
data_symbols[data_symbols == orig] = new
random_line_split
tic_tac_toe.py
""" Tic Tac Toe TODO * Refactor code to make it more AI tournament-friendly * ai_move function must only take in game state next time ^ re factor as class? (keep track of game_tree and trash talk flag) """ import collections from functools import partial import logging import random import numpy as np try: import ujson as json except ImportError: import json GAME_TREE_FILE = "tic-tac-toe_game_tree.json" class TicTacToe(object): """TicTacToe object representing a game state Notes: * game states are encoded as 9-character strings * valid characters are "1", "2", and "_" """ def __init__(self, state, symbols=("O", "X")): """ PARAMETERS state (str): 9 character string representing state symbols (tuple): 2-element tuple representing game symbols * tuple in first position goes first; vice versa """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" self.state = state self.counter = collections.Counter(state) self.validate_state(state) self.symbols = symbols self.data = np.array(list(state)) self.board = self.data.reshape((3, 3)) self.game_status = self.check_game_status() self.next_moves = self.find_next_states() self.next_states = list(self.next_moves.values()) def validate_state(self, state): """Check if a state is valid""" assert isinstance(state, str), "`state` must be of type str" assert len(state) == 9, "`state` must have exactly 9 elements" state = state.upper() assert set(state) <= { "1", "2", "_", }, "`state` elements must be in {'1', '2', '_'}" assert ( 0 <= self.counter["1"] - self.counter["2"] <= 1 ), "Invalid state: Players must alternate." def check_game_status(self): """Check status of game (turn, tie, win)""" for player in ("1", "2"): row_win = np.apply_along_axis( lambda x: set(x) == {player}, 1, self.board ).any() col_win = np.apply_along_axis( lambda x: set(x) == {player}, 0, self.board ).any() d1_win = set(self.data[[0, 4, 8]]) == {player} d2_win = set(self.data[[2, 4, 6]]) == {player} if any([row_win, col_win, d1_win, d2_win]): return ("win", player) if self.counter["_"] == 0: return ("tie", None) else: return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2") def find_next_states(self): """Determine possible next moves. Returns a dict {index: new_state}""" status, player = self.game_status moves = {} if status == "turn": for idx in np.where(self.data == "_")[0]: new_move = self.data.copy() new_move[idx] = player moves[idx] = "".join(new_move) return moves def printable_board(self, indent_char="\t", legend_hint=True, symbols=None): """Returns a string representing the game board for printing""" symbols = symbols or self.symbols assert len(symbols) == 2, "`symbols` must have exactly 2 elements" data_symbols = self.data.copy() for orig, new in zip(("1", "2"), symbols): data_symbols[data_symbols == orig] = new board_symbols = data_symbols.reshape((3, 3)) if legend_hint: legend_board = np.where( self.data == "_", range(9), " ").reshape((3, 3)) return "\n".join( [indent_char + "GAME | INDEX"] + [indent_char + "===== | ====="] + [ indent_char + " ".join(b_row) + " | " + " ".join(l_row) for b_row, l_row in zip(board_symbols, legend_board) ] ) else: return "\n".join([indent_char + " ".join(row) for row in board_symbols]) def gen_game_tree(state_init): """Generate full game tree from initial state""" current_path = [state_init] game_tree = {} while current_path: cur_state = current_path[-1] if cur_state not in game_tree: ttt = TicTacToe(cur_state) game_tree[cur_state] = { "unexplored": ttt.next_states, "explored": [], "status": ttt.game_status, } if game_tree[cur_state]["unexplored"]: current_path.append(game_tree[cur_state]["unexplored"].pop(0)) else: explored = current_path.pop(-1) if explored != state_init: game_tree[current_path[-1]]["explored"].append(explored) status, player = game_tree[cur_state]["status"] if status == "tie": value = 0 outcomes = {0: 1} elif status == "win": value = -1 if player == "1" else 1 outcomes = {value: 1} else: value = (min if player == "1" else max)( [ game_tree[state]["value"] for state in game_tree[cur_state]["explored"] ] ) outcomes = {} for state in game_tree[cur_state]["explored"]: for res, res_ct in game_tree[state]["outcomes"].items(): outcomes[res] = outcomes.get(res, 0) + res_ct game_tree[cur_state]["value"] = value game_tree[cur_state]["outcomes"] = outcomes return game_tree def answer_exercise(): """Function to answer exercise in Chapter 2 section III""" state = "1212__21_" ttt = TicTacToe(state) game_tree = gen_game_tree(state) print( f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n" f"is {game_tree[state]['value']}" ) return game_tree[state]["value"] def learn(state="_________"): """Build game tree and export given initial state""" game_tree = gen_game_tree(state) with open(GAME_TREE_FILE, "w") as gt_file: json.dump(game_tree, gt_file, indent=4) def human(gstate: TicTacToe, *args): """Function for a human player""" return input_with_validation("Please enter move.", list(gstate.next_moves.keys())) def ai_w_level(gstate: TicTacToe, game_tree, level=3): """AI with levels Level Descriptions * 0 = stupid * 1 = easy * 2 = medium * 3 = hard * 4 = unfair """ assert isinstance(level, int), "`level` must be `int`" assert 0 <= level <= 4, "level values must be from 0 to 4" seed = random.random() logging.debug(f"seed value: {seed:.3f}") if level == 0: ai_func = ai_derp elif level == 1: ai_func = ai_derp if seed <= 0.3 else ai_strategy1 elif level == 2: ai_func = ai_derp if seed <= 0.2 else ai_strategy2 elif level == 3: ai_func = ai_derp if seed <= 0.1 else ai_strategy3 elif level == 4: ai_func = ai_strategy3 return ai_func(gstate, game_tree) def ai_derp(gstate: TicTacToe, *args): """AI that randomly picks the next move""" return random.choice(list(gstate.next_moves.keys())) def ai_strategy1(gstate: TicTacToe, game_tree): """Strategy assuming opponent plays optimally""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None mod = -1 if player == "1" else 1 next_move_vals = { idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = [idx for idx, val in next_move_vals.items() if val == max_val] logging.debug("moves: %s; value: %i", moves, max_val) move = random.choice(moves) return move def ai_strategy2(gstate: TicTacToe, game_tree): """Strategy maximizing the number of paths to winning end states""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) next_move_vals = { idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items() } max_val = max(next_move_vals.values()) moves = { idx: ( game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0), game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0) ) for idx, val in next_move_vals.items() if val == max_val } win_ct = {idx: vals[0] for idx, vals in moves.items()} win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()} lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()} wl_ratio = {idx: vals[0] / max(vals[2], 0.5) for idx, vals in moves.items()} # criteria, agg_func = lose_pct, min # criteria, agg_func = win_pct, max criteria, agg_func = wl_ratio, max if max_val == 1 and 1 in win_ct.values(): move = [idx for idx, val in win_ct.items() if val == 1][0] else: move = random.choice( [idx for idx, val in criteria.items() if val == agg_func(criteria.values())] ) logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n", move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves) # trash talk if max_val == 1: print( "*beep* *boop* *beep*" " -=[ I calculate chances of winning to be 100% ]=- " "*beep* *boop* *beep*" ) return move def ai_strategy3(gstate: TicTacToe, game_tree): """AI strategy that maximizes the opponent's losing moves in the next turn""" status, player = gstate.game_status if status != "turn": logging.warning("Game status = %s. No move needed.", status) return None win, lose = (-1, 1) if player == "1" else (1, -1) move_vals = { move: win * game_tree[state]["value"] for move, state in gstate.next_moves.items() } max_val = max(move_vals.values()) if max_val == 1: # ai_strategy2 can handle "won" states return ai_strategy2(gstate, game_tree) else: ok_moves = [move for move, val in move_vals.items() if val == max_val] move_vals = { move: collections.Counter( [ game_tree[state2]["value"] for state2 in game_tree[gstate.next_moves[move]]["explored"] ] ) for move in ok_moves } move_eval = { move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5) for move, val_ctr in move_vals.items() } max_win_pct = max(move_eval.values()) good_moves = [move for move, win_pct in move_eval.items() if win_pct == max_win_pct] move = random.choice(good_moves) all_ct = sum(move_vals[move].values()) win_ct = move_vals[move].get(win, 0) logging.debug( "move: %i; value: %i, win %%: %.1f%%, moves: %s\n", move, win * game_tree[gstate.state]["value"], win_ct / max(all_ct, 0.1) * 100, move_vals ) return move def input_with_validation(text, choices): """Take input with validation""" choice_vals = set(map(str, choices)) while True: val = input(f"{text} | choices={choices}: ") if val in choice_vals: return val else:
def start_game(player1, player2, symbols=("O", "X")): """Starts a command line tic tac toe game between 2 players (bot or human)""" assert len(symbols) == 2, "`symbols` must have exactly 2 elements" gstate = TicTacToe("_________", symbols=symbols) with open(GAME_TREE_FILE, "r") as gt_file: game_tree = json.load(gt_file) while True: status, player = gstate.game_status if status == "turn": print(f"\n=== Player {player}'s turn:\n\n") print(gstate.printable_board(legend_hint=True)) print("\n") if player == "1": p_move = player1(gstate, game_tree) else: p_move = player2(gstate, game_tree) print(f"\n>>> Player {player} has chosen: {p_move}") new_state = gstate.data.copy() new_state[int(p_move)] = player gstate = TicTacToe("".join(new_state), symbols=symbols) else: print('\n') print(gstate.printable_board(legend_hint=True)) if status == "win": print(f"\n~~~~~ Player {player} wins! ~~~~~\n") else: print("\n~~~~~ It's a Tie! ~~~~~\n") return gstate.game_status def menu(n_player=None, symbols=("O", "X"), ai_1=None, ai_2=None): """start CLI based game menu PARAMETERS n_player (int): number of players * 0 = ai_1 vs ai_2; order decided at random * 1 = human vs ai_1; order decided by player * 2 = human vs human symbols (tuple): 2 element tuple for board symbols (in order) ai_1, ai_2 (callable): ai function that takes the following as parameters: * gstate (TicTacToe): game state * game_tree (dict): game tree JSON * TODO: in the future, each AI class should store its own game tree """ assert len(symbols) == 2, "`symbols` must have exactly 2 elements" ai_1 = ai_1 or ai_strategy3 ai_2 = ai_2 or ai_derp print("\n*** Let's play TIC TAC TOE! ***\n") n_player = n_player if n_player is not None else int( input_with_validation("Choose number of players.", ["0", "1", "2"]) ) if n_player < 2: if n_player == 1: player_symbol = input_with_validation( f"Choose symbol ({symbols[0]} goes first).", symbols) player_order = symbols.index(player_symbol) player1, player2 = [human, ai_1][::(1 - player_order * 2)] else: if random.random() > 0.5: print("\n\t* O: ai_2, X: ai_1...") player1, player2 = ai_2, ai_1 else: print("\n\t* O: ai_1, X: ai_2...") player1, player2 = ai_1, ai_2 else: player1, player2 = human, human start_game(player1, player2, symbols=symbols) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) LEVEL_DESCS = ["stupid", "easy", "normal", "hard", "unfair"] LEVEL = 2 print(f"\n[ You're playing with an AI at level {LEVEL} ({LEVEL_DESCS[LEVEL]}) ]\n") menu(n_player=1, ai_1=partial(ai_w_level, level=LEVEL), symbols=("O","X"))
print(f"{val} is not a valid value. Please choose from: {choices}")
conditional_block
web-trader - console.py
# -*- coding: utf-8 -*- """ Web-Based Equities Trading System DATA 602 Assignment 2 / Prof. Jamiel Sheikh TBD Created October/November 2017 @author: Ilya Kats """ # Required libraries import urllib.request as req import pandas as pd from bs4 import BeautifulSoup from datetime import datetime from pymongo import MongoClient # Global variables symbols = ('AAPL','AMZN','MSFT','INTC','SNAP') menu = ['[B]uy','[S]ell','Show [P]/L','Show Blotte[r]','[Q]uit'] initial_cash = 10000000.00 cash = 0.00 blotter = pd.DataFrame(columns=['Side','Ticker','Volume','Price','Date','Cash']) pl = pd.DataFrame(columns=['Ticker','Position','WAP','RPL']) # Stock price is extracted from Yahoo! Finance page. # Rather than extracting display value from HTML, # the price is extracted from data stored in JavaScript code. def getPrice(symbol): ''' Gets current market price. Args: symbol: Ticker symbol. Returns: Current stock price for a symbol from Yahoo! Finance or -1 if price could not be extracted. ''' price = -1 url = 'https://finance.yahoo.com/quote/'+symbol page = req.urlopen(url).read() soup = BeautifulSoup(page, 'html.parser') scripts = soup.findAll('script') # Cycle through all script blocks to find the one with data # Ideally, data in JSON format should be properly converted and read # Here it is simply extracted with string functions for s in scripts: pos = s.text.find('currentPrice') if pos>0: sPrice = s.text[pos:s.text.find(',',pos)] try: price = float(sPrice[sPrice.rfind(':')+1:]) except ValueError: return -1 break return price def showBlotter(): '''Displays entire blotter''' print('\nCURRENT BLOTTER') print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format( 'Side','Ticker','Volume','Price','Date and Time','Cash') ) for index, row in blotter.iterrows(): print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format( row['Side'], row['Ticker'], row['Volume'], row['Price'], str(row['Date']), row['Cash'] )) if blotter.empty: print('[No Trades Recorded]') print('') # Using same function for calculating UPL and RPL since the formula is the same def getPL(position, price, wap): '''Calculates UPL or RPL based on position/volume, market/sell price and WAP.''' return (position*(price-wap)) def updateWAP(currentWAP, currentPosition, price, volume): '''Calculates new WAP based on previous WAP and new buy information.''' return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume) # Display current P/L with refreshed market price def
(): '''Displays current P/L with updated market price.''' print('\nCURRENT P/L') print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format( 'Ticker','Position','Market','WAP','UPL','RPL') ) for index, row in pl.iterrows(): price = getPrice(row['Ticker']) print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format( row['Ticker'], row['Position'], price, row['WAP'], getPL(row['Position'], price, row['WAP']), row['RPL']) ) if pl.empty: print('[Holding no positions]') print('Cash: ${:,.2f}\n'.format(cash)) def getShares(side): '''Prompt for and return number of shares to buy or sell. Argument is either "buy" or "sell".''' shares = input('Enter number of shares to {:s}: '.format(side)) try: numShares = int(shares) except ValueError: print ('Invalid number of shares.\n') return -1 if numShares<0: print ('Invalid number of shares. Must be positive.\n') return -1 return numShares def getSymbol(side): '''Prompt for and return stock symbol to buy or sell. Argument is either "buy" or "sell".''' symbol = input('Enter stock symbol to {:s}: '.format(side)).upper() if symbol not in symbols: print ('Invalid symbol. Valid symbols:') for s in symbols: print(s, end=" ") print('\n') return '' return symbol def doBuy(symbol, volume): ''' Buys given amount of selected stock. Args: symbol: Stock to purchase. volume: Number of shares to purchase. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Buy unsuccessful. Could not get valid market price.\n') return False # Check that have enough cash if (volume*price)>cash: print ('Buy unsuccessful. Not enough cash.\n') return False # Perform buy - add to P/L and adjust cash position if symbol in pl.index: pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'], pl.loc[symbol]['Position'], price, volume) pl.at[symbol,'Position'] += volume else: entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]], columns=['Ticker','Position','WAP','RPL'], index=[symbol]) pl = pl.append(entry) savePL() cash -= volume*price saveCash() # Add to blotter entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def doSell(symbol, volume): ''' Sells given amount of selected stock. Args: symbol: Stock to sell. volume: Number of shares to sell. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Check that have any shares if symbol not in pl.index: print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol)) return False # Check that have enough shares if volume>pl.loc[symbol]['Position']: print ('Sell unsuccessful. Not enough shares.\n') return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Sell unsuccessful. Could not get valid market price.\n') return False # Perform sell pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP']) pl.at[symbol,'Position'] -= volume cash += volume*price saveCash() # Reset WAP if closing the position if pl.loc[symbol]['Position']==0: pl.at[symbol,'WAP']=0 savePL() # Add to blotter entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def showMenu(): '''Displays main menu and prompts for choice. Returns valid choice.''' while True: print(' - ', end='') for i in menu: print(i, end=' - ') print('') option = input('Select option: ').upper() if option in ['1','2','3','4','5','B','S','P','R','Q','1929']: return option print('Invalid choice. Please try again.\n') def connectDB(): '''Connects to database.''' global db client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin") db = client.web_trader def retrievePL(): '''Retrieves full P/L information from the database.''' global pl global db if db.pl.count()==0: initializePL() else: pl = pd.DataFrame(list(db.pl.find({}))) del pl['_id'] pl.index = pl['Ticker'] def savePL(): '''Saves full P/L information to the database.''' global pl global db if db.pl.count()>0: db.pl.delete_many({}) if not pl.empty: db.pl.insert_many(pl.to_dict('records')) def retrieveBlotter(): '''Retrieves full Blotter information from the database.''' global blotter global db if db.blotter.count()==0: blotter = blotter.iloc[0:0] else: blotter = pd.DataFrame(list(db.blotter.find({}))) del blotter['_id'] def initializePL(): '''Resets P/L.''' global pl pl = pl.iloc[0:0] def saveCash(): '''Saves cash value in the database.''' global cash global db if db.cash.count()==0: db.cash.insert_one(dict([('position',cash)])) else: db.cash.update_one({},{'$set':dict([('position',cash)])}, upsert=False) def retrieveCash(): '''Retrieves cash value from the database.''' global cash global db if db.cash.count()==0: cash = initial_cash saveCash() else: cash = db.cash.find_one()['position'] def main(): pass print('************************************************') print('* LITTLE WEB CONSOLE EQUITIES TRADING SYSTEM *') print('* Data 602 Ilya Kats *') print('************************************************') print('\nInformation is provided for illustrative purposes only.') print('Market price from Yahoo! Finance is delayed.\n') # Initialize environment global cash global blotter global pl # Connect to database and connectDB() retrievePL() retrieveBlotter() retrieveCash() choice = '' while choice!='5' and choice!='Q': choice = showMenu() if choice=='B' or choice=='1': symbol = getSymbol('buy') if symbol!='': shares = getShares('buy') if shares>0: doBuy(symbol, shares) elif choice=='S' or choice=='2': symbol = getSymbol('sell') if symbol!='': shares = getShares('sell') if shares>0: doSell(symbol, shares) elif choice=='P' or choice=='3': showPL() elif choice=='R' or choice=='4': showBlotter() print('\nThank you for using Little Console Equities Trading System!') if __name__ == '__main__': main()
showPL
identifier_name
web-trader - console.py
# -*- coding: utf-8 -*- """ Web-Based Equities Trading System DATA 602 Assignment 2 / Prof. Jamiel Sheikh TBD Created October/November 2017 @author: Ilya Kats """ # Required libraries import urllib.request as req import pandas as pd from bs4 import BeautifulSoup from datetime import datetime from pymongo import MongoClient # Global variables symbols = ('AAPL','AMZN','MSFT','INTC','SNAP') menu = ['[B]uy','[S]ell','Show [P]/L','Show Blotte[r]','[Q]uit'] initial_cash = 10000000.00 cash = 0.00 blotter = pd.DataFrame(columns=['Side','Ticker','Volume','Price','Date','Cash']) pl = pd.DataFrame(columns=['Ticker','Position','WAP','RPL']) # Stock price is extracted from Yahoo! Finance page. # Rather than extracting display value from HTML, # the price is extracted from data stored in JavaScript code. def getPrice(symbol): ''' Gets current market price. Args: symbol: Ticker symbol. Returns: Current stock price for a symbol from Yahoo! Finance or -1 if price could not be extracted. ''' price = -1 url = 'https://finance.yahoo.com/quote/'+symbol page = req.urlopen(url).read() soup = BeautifulSoup(page, 'html.parser') scripts = soup.findAll('script') # Cycle through all script blocks to find the one with data # Ideally, data in JSON format should be properly converted and read # Here it is simply extracted with string functions for s in scripts: pos = s.text.find('currentPrice') if pos>0: sPrice = s.text[pos:s.text.find(',',pos)] try: price = float(sPrice[sPrice.rfind(':')+1:]) except ValueError: return -1 break return price def showBlotter(): '''Displays entire blotter''' print('\nCURRENT BLOTTER') print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format( 'Side','Ticker','Volume','Price','Date and Time','Cash') ) for index, row in blotter.iterrows(): print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format( row['Side'], row['Ticker'], row['Volume'], row['Price'], str(row['Date']), row['Cash'] )) if blotter.empty: print('[No Trades Recorded]') print('') # Using same function for calculating UPL and RPL since the formula is the same def getPL(position, price, wap): '''Calculates UPL or RPL based on position/volume, market/sell price and WAP.''' return (position*(price-wap)) def updateWAP(currentWAP, currentPosition, price, volume): '''Calculates new WAP based on previous WAP and new buy information.''' return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume) # Display current P/L with refreshed market price def showPL(): '''Displays current P/L with updated market price.''' print('\nCURRENT P/L') print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format( 'Ticker','Position','Market','WAP','UPL','RPL') ) for index, row in pl.iterrows(): price = getPrice(row['Ticker']) print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format( row['Ticker'], row['Position'], price, row['WAP'], getPL(row['Position'], price, row['WAP']), row['RPL']) ) if pl.empty: print('[Holding no positions]') print('Cash: ${:,.2f}\n'.format(cash)) def getShares(side): '''Prompt for and return number of shares to buy or sell. Argument is either "buy" or "sell".''' shares = input('Enter number of shares to {:s}: '.format(side)) try: numShares = int(shares) except ValueError: print ('Invalid number of shares.\n') return -1 if numShares<0: print ('Invalid number of shares. Must be positive.\n') return -1 return numShares def getSymbol(side):
def doBuy(symbol, volume): ''' Buys given amount of selected stock. Args: symbol: Stock to purchase. volume: Number of shares to purchase. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Buy unsuccessful. Could not get valid market price.\n') return False # Check that have enough cash if (volume*price)>cash: print ('Buy unsuccessful. Not enough cash.\n') return False # Perform buy - add to P/L and adjust cash position if symbol in pl.index: pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'], pl.loc[symbol]['Position'], price, volume) pl.at[symbol,'Position'] += volume else: entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]], columns=['Ticker','Position','WAP','RPL'], index=[symbol]) pl = pl.append(entry) savePL() cash -= volume*price saveCash() # Add to blotter entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def doSell(symbol, volume): ''' Sells given amount of selected stock. Args: symbol: Stock to sell. volume: Number of shares to sell. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Check that have any shares if symbol not in pl.index: print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol)) return False # Check that have enough shares if volume>pl.loc[symbol]['Position']: print ('Sell unsuccessful. Not enough shares.\n') return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Sell unsuccessful. Could not get valid market price.\n') return False # Perform sell pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP']) pl.at[symbol,'Position'] -= volume cash += volume*price saveCash() # Reset WAP if closing the position if pl.loc[symbol]['Position']==0: pl.at[symbol,'WAP']=0 savePL() # Add to blotter entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def showMenu(): '''Displays main menu and prompts for choice. Returns valid choice.''' while True: print(' - ', end='') for i in menu: print(i, end=' - ') print('') option = input('Select option: ').upper() if option in ['1','2','3','4','5','B','S','P','R','Q','1929']: return option print('Invalid choice. Please try again.\n') def connectDB(): '''Connects to database.''' global db client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin") db = client.web_trader def retrievePL(): '''Retrieves full P/L information from the database.''' global pl global db if db.pl.count()==0: initializePL() else: pl = pd.DataFrame(list(db.pl.find({}))) del pl['_id'] pl.index = pl['Ticker'] def savePL(): '''Saves full P/L information to the database.''' global pl global db if db.pl.count()>0: db.pl.delete_many({}) if not pl.empty: db.pl.insert_many(pl.to_dict('records')) def retrieveBlotter(): '''Retrieves full Blotter information from the database.''' global blotter global db if db.blotter.count()==0: blotter = blotter.iloc[0:0] else: blotter = pd.DataFrame(list(db.blotter.find({}))) del blotter['_id'] def initializePL(): '''Resets P/L.''' global pl pl = pl.iloc[0:0] def saveCash(): '''Saves cash value in the database.''' global cash global db if db.cash.count()==0: db.cash.insert_one(dict([('position',cash)])) else: db.cash.update_one({},{'$set':dict([('position',cash)])}, upsert=False) def retrieveCash(): '''Retrieves cash value from the database.''' global cash global db if db.cash.count()==0: cash = initial_cash saveCash() else: cash = db.cash.find_one()['position'] def main(): pass print('************************************************') print('* LITTLE WEB CONSOLE EQUITIES TRADING SYSTEM *') print('* Data 602 Ilya Kats *') print('************************************************') print('\nInformation is provided for illustrative purposes only.') print('Market price from Yahoo! Finance is delayed.\n') # Initialize environment global cash global blotter global pl # Connect to database and connectDB() retrievePL() retrieveBlotter() retrieveCash() choice = '' while choice!='5' and choice!='Q': choice = showMenu() if choice=='B' or choice=='1': symbol = getSymbol('buy') if symbol!='': shares = getShares('buy') if shares>0: doBuy(symbol, shares) elif choice=='S' or choice=='2': symbol = getSymbol('sell') if symbol!='': shares = getShares('sell') if shares>0: doSell(symbol, shares) elif choice=='P' or choice=='3': showPL() elif choice=='R' or choice=='4': showBlotter() print('\nThank you for using Little Console Equities Trading System!') if __name__ == '__main__': main()
'''Prompt for and return stock symbol to buy or sell. Argument is either "buy" or "sell".''' symbol = input('Enter stock symbol to {:s}: '.format(side)).upper() if symbol not in symbols: print ('Invalid symbol. Valid symbols:') for s in symbols: print(s, end=" ") print('\n') return '' return symbol
identifier_body
web-trader - console.py
# -*- coding: utf-8 -*- """ Web-Based Equities Trading System DATA 602 Assignment 2 / Prof. Jamiel Sheikh TBD Created October/November 2017 @author: Ilya Kats """ # Required libraries import urllib.request as req import pandas as pd from bs4 import BeautifulSoup
# Global variables symbols = ('AAPL','AMZN','MSFT','INTC','SNAP') menu = ['[B]uy','[S]ell','Show [P]/L','Show Blotte[r]','[Q]uit'] initial_cash = 10000000.00 cash = 0.00 blotter = pd.DataFrame(columns=['Side','Ticker','Volume','Price','Date','Cash']) pl = pd.DataFrame(columns=['Ticker','Position','WAP','RPL']) # Stock price is extracted from Yahoo! Finance page. # Rather than extracting display value from HTML, # the price is extracted from data stored in JavaScript code. def getPrice(symbol): ''' Gets current market price. Args: symbol: Ticker symbol. Returns: Current stock price for a symbol from Yahoo! Finance or -1 if price could not be extracted. ''' price = -1 url = 'https://finance.yahoo.com/quote/'+symbol page = req.urlopen(url).read() soup = BeautifulSoup(page, 'html.parser') scripts = soup.findAll('script') # Cycle through all script blocks to find the one with data # Ideally, data in JSON format should be properly converted and read # Here it is simply extracted with string functions for s in scripts: pos = s.text.find('currentPrice') if pos>0: sPrice = s.text[pos:s.text.find(',',pos)] try: price = float(sPrice[sPrice.rfind(':')+1:]) except ValueError: return -1 break return price def showBlotter(): '''Displays entire blotter''' print('\nCURRENT BLOTTER') print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format( 'Side','Ticker','Volume','Price','Date and Time','Cash') ) for index, row in blotter.iterrows(): print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format( row['Side'], row['Ticker'], row['Volume'], row['Price'], str(row['Date']), row['Cash'] )) if blotter.empty: print('[No Trades Recorded]') print('') # Using same function for calculating UPL and RPL since the formula is the same def getPL(position, price, wap): '''Calculates UPL or RPL based on position/volume, market/sell price and WAP.''' return (position*(price-wap)) def updateWAP(currentWAP, currentPosition, price, volume): '''Calculates new WAP based on previous WAP and new buy information.''' return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume) # Display current P/L with refreshed market price def showPL(): '''Displays current P/L with updated market price.''' print('\nCURRENT P/L') print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format( 'Ticker','Position','Market','WAP','UPL','RPL') ) for index, row in pl.iterrows(): price = getPrice(row['Ticker']) print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format( row['Ticker'], row['Position'], price, row['WAP'], getPL(row['Position'], price, row['WAP']), row['RPL']) ) if pl.empty: print('[Holding no positions]') print('Cash: ${:,.2f}\n'.format(cash)) def getShares(side): '''Prompt for and return number of shares to buy or sell. Argument is either "buy" or "sell".''' shares = input('Enter number of shares to {:s}: '.format(side)) try: numShares = int(shares) except ValueError: print ('Invalid number of shares.\n') return -1 if numShares<0: print ('Invalid number of shares. Must be positive.\n') return -1 return numShares def getSymbol(side): '''Prompt for and return stock symbol to buy or sell. Argument is either "buy" or "sell".''' symbol = input('Enter stock symbol to {:s}: '.format(side)).upper() if symbol not in symbols: print ('Invalid symbol. Valid symbols:') for s in symbols: print(s, end=" ") print('\n') return '' return symbol def doBuy(symbol, volume): ''' Buys given amount of selected stock. Args: symbol: Stock to purchase. volume: Number of shares to purchase. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Buy unsuccessful. Could not get valid market price.\n') return False # Check that have enough cash if (volume*price)>cash: print ('Buy unsuccessful. Not enough cash.\n') return False # Perform buy - add to P/L and adjust cash position if symbol in pl.index: pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'], pl.loc[symbol]['Position'], price, volume) pl.at[symbol,'Position'] += volume else: entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]], columns=['Ticker','Position','WAP','RPL'], index=[symbol]) pl = pl.append(entry) savePL() cash -= volume*price saveCash() # Add to blotter entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def doSell(symbol, volume): ''' Sells given amount of selected stock. Args: symbol: Stock to sell. volume: Number of shares to sell. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Check that have any shares if symbol not in pl.index: print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol)) return False # Check that have enough shares if volume>pl.loc[symbol]['Position']: print ('Sell unsuccessful. Not enough shares.\n') return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Sell unsuccessful. Could not get valid market price.\n') return False # Perform sell pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP']) pl.at[symbol,'Position'] -= volume cash += volume*price saveCash() # Reset WAP if closing the position if pl.loc[symbol]['Position']==0: pl.at[symbol,'WAP']=0 savePL() # Add to blotter entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def showMenu(): '''Displays main menu and prompts for choice. Returns valid choice.''' while True: print(' - ', end='') for i in menu: print(i, end=' - ') print('') option = input('Select option: ').upper() if option in ['1','2','3','4','5','B','S','P','R','Q','1929']: return option print('Invalid choice. Please try again.\n') def connectDB(): '''Connects to database.''' global db client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin") db = client.web_trader def retrievePL(): '''Retrieves full P/L information from the database.''' global pl global db if db.pl.count()==0: initializePL() else: pl = pd.DataFrame(list(db.pl.find({}))) del pl['_id'] pl.index = pl['Ticker'] def savePL(): '''Saves full P/L information to the database.''' global pl global db if db.pl.count()>0: db.pl.delete_many({}) if not pl.empty: db.pl.insert_many(pl.to_dict('records')) def retrieveBlotter(): '''Retrieves full Blotter information from the database.''' global blotter global db if db.blotter.count()==0: blotter = blotter.iloc[0:0] else: blotter = pd.DataFrame(list(db.blotter.find({}))) del blotter['_id'] def initializePL(): '''Resets P/L.''' global pl pl = pl.iloc[0:0] def saveCash(): '''Saves cash value in the database.''' global cash global db if db.cash.count()==0: db.cash.insert_one(dict([('position',cash)])) else: db.cash.update_one({},{'$set':dict([('position',cash)])}, upsert=False) def retrieveCash(): '''Retrieves cash value from the database.''' global cash global db if db.cash.count()==0: cash = initial_cash saveCash() else: cash = db.cash.find_one()['position'] def main(): pass print('************************************************') print('* LITTLE WEB CONSOLE EQUITIES TRADING SYSTEM *') print('* Data 602 Ilya Kats *') print('************************************************') print('\nInformation is provided for illustrative purposes only.') print('Market price from Yahoo! Finance is delayed.\n') # Initialize environment global cash global blotter global pl # Connect to database and connectDB() retrievePL() retrieveBlotter() retrieveCash() choice = '' while choice!='5' and choice!='Q': choice = showMenu() if choice=='B' or choice=='1': symbol = getSymbol('buy') if symbol!='': shares = getShares('buy') if shares>0: doBuy(symbol, shares) elif choice=='S' or choice=='2': symbol = getSymbol('sell') if symbol!='': shares = getShares('sell') if shares>0: doSell(symbol, shares) elif choice=='P' or choice=='3': showPL() elif choice=='R' or choice=='4': showBlotter() print('\nThank you for using Little Console Equities Trading System!') if __name__ == '__main__': main()
from datetime import datetime from pymongo import MongoClient
random_line_split
web-trader - console.py
# -*- coding: utf-8 -*- """ Web-Based Equities Trading System DATA 602 Assignment 2 / Prof. Jamiel Sheikh TBD Created October/November 2017 @author: Ilya Kats """ # Required libraries import urllib.request as req import pandas as pd from bs4 import BeautifulSoup from datetime import datetime from pymongo import MongoClient # Global variables symbols = ('AAPL','AMZN','MSFT','INTC','SNAP') menu = ['[B]uy','[S]ell','Show [P]/L','Show Blotte[r]','[Q]uit'] initial_cash = 10000000.00 cash = 0.00 blotter = pd.DataFrame(columns=['Side','Ticker','Volume','Price','Date','Cash']) pl = pd.DataFrame(columns=['Ticker','Position','WAP','RPL']) # Stock price is extracted from Yahoo! Finance page. # Rather than extracting display value from HTML, # the price is extracted from data stored in JavaScript code. def getPrice(symbol): ''' Gets current market price. Args: symbol: Ticker symbol. Returns: Current stock price for a symbol from Yahoo! Finance or -1 if price could not be extracted. ''' price = -1 url = 'https://finance.yahoo.com/quote/'+symbol page = req.urlopen(url).read() soup = BeautifulSoup(page, 'html.parser') scripts = soup.findAll('script') # Cycle through all script blocks to find the one with data # Ideally, data in JSON format should be properly converted and read # Here it is simply extracted with string functions for s in scripts: pos = s.text.find('currentPrice') if pos>0: sPrice = s.text[pos:s.text.find(',',pos)] try: price = float(sPrice[sPrice.rfind(':')+1:]) except ValueError: return -1 break return price def showBlotter(): '''Displays entire blotter''' print('\nCURRENT BLOTTER') print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format( 'Side','Ticker','Volume','Price','Date and Time','Cash') ) for index, row in blotter.iterrows(): print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format( row['Side'], row['Ticker'], row['Volume'], row['Price'], str(row['Date']), row['Cash'] )) if blotter.empty: print('[No Trades Recorded]') print('') # Using same function for calculating UPL and RPL since the formula is the same def getPL(position, price, wap): '''Calculates UPL or RPL based on position/volume, market/sell price and WAP.''' return (position*(price-wap)) def updateWAP(currentWAP, currentPosition, price, volume): '''Calculates new WAP based on previous WAP and new buy information.''' return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume) # Display current P/L with refreshed market price def showPL(): '''Displays current P/L with updated market price.''' print('\nCURRENT P/L') print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format( 'Ticker','Position','Market','WAP','UPL','RPL') ) for index, row in pl.iterrows(): price = getPrice(row['Ticker']) print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format( row['Ticker'], row['Position'], price, row['WAP'], getPL(row['Position'], price, row['WAP']), row['RPL']) ) if pl.empty: print('[Holding no positions]') print('Cash: ${:,.2f}\n'.format(cash)) def getShares(side): '''Prompt for and return number of shares to buy or sell. Argument is either "buy" or "sell".''' shares = input('Enter number of shares to {:s}: '.format(side)) try: numShares = int(shares) except ValueError: print ('Invalid number of shares.\n') return -1 if numShares<0: print ('Invalid number of shares. Must be positive.\n') return -1 return numShares def getSymbol(side): '''Prompt for and return stock symbol to buy or sell. Argument is either "buy" or "sell".''' symbol = input('Enter stock symbol to {:s}: '.format(side)).upper() if symbol not in symbols: print ('Invalid symbol. Valid symbols:') for s in symbols: print(s, end=" ") print('\n') return '' return symbol def doBuy(symbol, volume): ''' Buys given amount of selected stock. Args: symbol: Stock to purchase. volume: Number of shares to purchase. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Buy unsuccessful. Could not get valid market price.\n') return False # Check that have enough cash if (volume*price)>cash: print ('Buy unsuccessful. Not enough cash.\n') return False # Perform buy - add to P/L and adjust cash position if symbol in pl.index: pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'], pl.loc[symbol]['Position'], price, volume) pl.at[symbol,'Position'] += volume else: entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]], columns=['Ticker','Position','WAP','RPL'], index=[symbol]) pl = pl.append(entry) savePL() cash -= volume*price saveCash() # Add to blotter entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def doSell(symbol, volume): ''' Sells given amount of selected stock. Args: symbol: Stock to sell. volume: Number of shares to sell. Returns: TRUE if successful and FALSE otherwise. ''' global cash global blotter global pl global db # Check that it's a valid symbol if symbol not in symbols: print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol)) return False # Check that have any shares if symbol not in pl.index: print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol)) return False # Check that have enough shares if volume>pl.loc[symbol]['Position']: print ('Sell unsuccessful. Not enough shares.\n') return False # Refresh price to get most up to date information price = getPrice(symbol) if price<0: print ('Sell unsuccessful. Could not get valid market price.\n') return False # Perform sell pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP']) pl.at[symbol,'Position'] -= volume cash += volume*price saveCash() # Reset WAP if closing the position if pl.loc[symbol]['Position']==0: pl.at[symbol,'WAP']=0 savePL() # Add to blotter entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]], columns=['Side','Ticker','Volume','Price','Date','Cash']) blotter = blotter.append(entry, ignore_index=True) db.blotter.insert_one(entry.to_dict('records')[0]) # Output status print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price)) return True def showMenu(): '''Displays main menu and prompts for choice. Returns valid choice.''' while True: print(' - ', end='') for i in menu: print(i, end=' - ') print('') option = input('Select option: ').upper() if option in ['1','2','3','4','5','B','S','P','R','Q','1929']:
print('Invalid choice. Please try again.\n') def connectDB(): '''Connects to database.''' global db client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin") db = client.web_trader def retrievePL(): '''Retrieves full P/L information from the database.''' global pl global db if db.pl.count()==0: initializePL() else: pl = pd.DataFrame(list(db.pl.find({}))) del pl['_id'] pl.index = pl['Ticker'] def savePL(): '''Saves full P/L information to the database.''' global pl global db if db.pl.count()>0: db.pl.delete_many({}) if not pl.empty: db.pl.insert_many(pl.to_dict('records')) def retrieveBlotter(): '''Retrieves full Blotter information from the database.''' global blotter global db if db.blotter.count()==0: blotter = blotter.iloc[0:0] else: blotter = pd.DataFrame(list(db.blotter.find({}))) del blotter['_id'] def initializePL(): '''Resets P/L.''' global pl pl = pl.iloc[0:0] def saveCash(): '''Saves cash value in the database.''' global cash global db if db.cash.count()==0: db.cash.insert_one(dict([('position',cash)])) else: db.cash.update_one({},{'$set':dict([('position',cash)])}, upsert=False) def retrieveCash(): '''Retrieves cash value from the database.''' global cash global db if db.cash.count()==0: cash = initial_cash saveCash() else: cash = db.cash.find_one()['position'] def main(): pass print('************************************************') print('* LITTLE WEB CONSOLE EQUITIES TRADING SYSTEM *') print('* Data 602 Ilya Kats *') print('************************************************') print('\nInformation is provided for illustrative purposes only.') print('Market price from Yahoo! Finance is delayed.\n') # Initialize environment global cash global blotter global pl # Connect to database and connectDB() retrievePL() retrieveBlotter() retrieveCash() choice = '' while choice!='5' and choice!='Q': choice = showMenu() if choice=='B' or choice=='1': symbol = getSymbol('buy') if symbol!='': shares = getShares('buy') if shares>0: doBuy(symbol, shares) elif choice=='S' or choice=='2': symbol = getSymbol('sell') if symbol!='': shares = getShares('sell') if shares>0: doSell(symbol, shares) elif choice=='P' or choice=='3': showPL() elif choice=='R' or choice=='4': showBlotter() print('\nThank you for using Little Console Equities Trading System!') if __name__ == '__main__': main()
return option
conditional_block
provider_test.go
package fs_test import ( "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "errors" "math/big" "github.com/sirupsen/logrus" logrustest "github.com/sirupsen/logrus/hooks/test" "golang.org/x/crypto/ssh" boshsysfakes "github.com/cloudfoundry/bosh-utils/system/fakes" . "github.com/dpb587/ssoca/certauth/fs" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Provider", func() { var subject Provider var fs boshsysfakes.FakeFileSystem var logger logrus.FieldLogger // certstrap init --key-bits 1024 --common-name ssoca-test --passphrase '' var ca1crtStr = `-----BEGIN CERTIFICATE----- MIIB5TCCAU6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwpzc29j YS10ZXN0MB4XDTE3MDIxMzIwMzMwOFoXDTI3MDIxMzIwMzMwOFowFTETMBEGA1UE AxMKc3NvY2EtdGVzdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA6Td3vsA/ f9lHQPeIzJB9J1JTXaDVfqoClU2ZRlua7BmlfXOQngo/1OmetO3THEr+mxFMGgfJ Z6CujBRd3A7/3h+Iw72jKGBag4iEBI9uBcyeRgWdzcV7l7dzQT00XCBnkJJRJV4j oDIovquAz6iKf4Al4wyQ5k1RM9KAlUipIFkCAwEAAaNFMEMwDgYDVR0PAQH/BAQD AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFP8lIbNl3zZPEHF17cFU NFsK/0/oMA0GCSqGSIb3DQEBCwUAA4GBADMCd4nzc19voa60lNknhsihcfyNUeUt EEsLCceK+9F1u2Xdj+mTNOh3MI+5m7wmFLiHuUtQovHMJ4xUpoHa6Iznc+QCbow4 SMO3sf1847tASv3eUFwEUt9vv39vtey6C6ftiUUImzZYfx6FO/A62uGEg2w3IOJ+ 3cCXYiulfsyv -----END CERTIFICATE-----` var ca1keyStr = `-----BEGIN RSA PRIVATE KEY----- MIICXwIBAAKBgQDpN3e+wD9/2UdA94jMkH0nUlNdoNV+qgKVTZlGW5rsGaV9c5Ce Cj/U6Z607dMcSv6bEUwaB8lnoK6MFF3cDv/eH4jDvaMoYFqDiIQEj24FzJ5GBZ3N xXuXt3NBPTRcIGeQklElXiOgMii+q4DPqIp/gCXjDJDmTVEz0oCVSKkgWQIDAQAB AoGBANC3T3drXmjw74/4+Hj7Jsa2Kt20Pt1pEX7FP9Nz0CZUnYK0lkyaJ55IpjyO S00a4NmulUkGhv0zFINRBt8WnW1bjBxNmqyBYh2diO3vA/gk8U1gcifW1LQt8WmE ietvN3OFXI1a7FipchCZYQn5Rr8O3a/tjwohtWIDdaDltw+xAkEA7Ybxu8OXQnvy Y+fDISRGG5vDFGnNGe9KcREIxSF6LWJ7+ap5LmMxnhfag5qlrObQW3K2miTpGYkl CIRRNFMIvwJBAPtatE1evu25R3NSTU2YwQgkEymh40PW+lncYge6ZqZGfK7J5JBK wr1ug7KjTJgIfY2Sg2VHn56HAdA4RUl2xOcCQQDZqnTxpQ6DHYSFqwg04cHhYP8H QOF0Z8WnEX4g8Em/N2X26BK+wKXig2d6fIhghu/fLaNKZJK8FOK8CE1GDuWPAkEA wrP6Ysx3vZH+JPil5Ovk6zd2mJNMhmpqt10dmrrrdPW483R01sjynOaUobYZSNOa 3iWWHsgifxw5bV+JXGTiFQJBAKwh6Hvli5hcfoepPMz2RQnmU1NM8hJOHHeZh+eT z6hlMpOS9rSjABcBdXxXjFXtIEjWUG5Tj8yOYd735zY8Ny8= -----END RSA PRIVATE KEY-----` pemToCertificate := func(bytes []byte) x509.Certificate { pem, _ := pem.Decode(bytes) if pem == nil { panic("failed decoding PEM") } certificate, err := x509.ParseCertificate(pem.Bytes) if err != nil { panic(err) } return *certificate } BeforeEach(func() { fs = *boshsysfakes.NewFakeFileSystem() fs.WriteFileString("/path/ca1/certificate", ca1crtStr) fs.WriteFileString("/path/ca1/private_key", ca1keyStr) fs.WriteFileString("/path/ca2/certificate", "broken") fs.WriteFileString("/path/ca2/private_key", "broken") logger, _ = logrustest.NewNullLogger() }) Describe("SignCertificate", func() { var testKey *rsa.PrivateKey var template x509.Certificate BeforeEach(func() { var err error testKey, err = rsa.GenerateKey(rand.Reader, 1024) if err != nil { Fail("generating private key") } template = x509.Certificate{ SerialNumber: big.NewInt(12345), Subject: pkix.Name{ CommonName: "ssoca-fake1", }, } }) It("signs certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) bytes, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).ToNot(HaveOccurred()) Expect(len(bytes)).To(BeNumerically(">", 0)) certificate := pemToCertificate(bytes) Expect(certificate.SerialNumber).To(BeEquivalentTo(big.NewInt(12345))) Expect(certificate.Subject.CommonName).To(Equal("ssoca-fake1")) caCertificate := pemToCertificate([]byte(ca1crtStr)) err = certificate.CheckSignatureFrom(&caCertificate) Expect(err).ToNot(HaveOccurred()) }) Context("certificate/key errors", func() { It("errors on missing certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca0/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA certificate")) }) It("errors on missing private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on misconfigured private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/certificate", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on invalid private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca2/certificate", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) }) }) Describe("SignSSHCertificate", func() { var testKey *rsa.PrivateKey var cert ssh.Certificate BeforeEach(func() { var err error testKey, err = rsa.GenerateKey(rand.Reader, 1024) if err != nil { Fail("generating private key") } publicKey, err := ssh.NewPublicKey(&testKey.PublicKey) if err != nil { Fail("parsing to public key") } cert = ssh.Certificate{ Nonce: []byte("ssoca-fake1"), Key: publicKey, CertType: ssh.UserCert, } }) It("signs certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) Expect(cert.Signature).To(BeNil()) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).ToNot(HaveOccurred()) // @todo use Verify instead Expect(cert.Signature).ToNot(BeNil()) }) Context("certificate/key errors", func() { It("errors on missing private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on misconfigured private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/certificate", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on invalid private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca2/certificate", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) }) }) Describe("GetCertificate", func() { It("provides certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) crt, err := subject.GetCertificate() Expect(err).ToNot(HaveOccurred()) Expect(crt).To(BeAssignableToTypeOf(&x509.Certificate{})) Expect(crt.IsCA).To(BeTrue()) Expect(crt.Subject.CommonName).To(Equal("ssoca-test")) }) Context("filesystem errors", func() { It("errors", func() { subject = NewProvider(
Config{ CertificatePath: "/path/ca0/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("reading certificate")) }) }) Context("bad certificate contents", func() { It("errors", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca2/certificate", PrivateKeyPath: "/path/ca2/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed decoding certificate PEM")) }) }) Context("invalid certificate format", func() { It("errors", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/private_key", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("parsing certificate")) }) }) }) Describe("GetCertificatePEM", func() { var subject Provider BeforeEach(func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) }) It("provides string", func() { crt, err := subject.GetCertificatePEM() Expect(err).ToNot(HaveOccurred()) Expect(crt).To(Equal(ca1crtStr)) }) Context("with filesystem error", func() { It("errors", func() { fs.RegisterReadFileError("/path/ca1/certificate", errors.New("fake-error1")) _, err := subject.GetCertificatePEM() Expect(err).To(HaveOccurred()) }) }) }) })
"name1",
random_line_split
provider_test.go
package fs_test import ( "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "errors" "math/big" "github.com/sirupsen/logrus" logrustest "github.com/sirupsen/logrus/hooks/test" "golang.org/x/crypto/ssh" boshsysfakes "github.com/cloudfoundry/bosh-utils/system/fakes" . "github.com/dpb587/ssoca/certauth/fs" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Provider", func() { var subject Provider var fs boshsysfakes.FakeFileSystem var logger logrus.FieldLogger // certstrap init --key-bits 1024 --common-name ssoca-test --passphrase '' var ca1crtStr = `-----BEGIN CERTIFICATE----- MIIB5TCCAU6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwpzc29j YS10ZXN0MB4XDTE3MDIxMzIwMzMwOFoXDTI3MDIxMzIwMzMwOFowFTETMBEGA1UE AxMKc3NvY2EtdGVzdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA6Td3vsA/ f9lHQPeIzJB9J1JTXaDVfqoClU2ZRlua7BmlfXOQngo/1OmetO3THEr+mxFMGgfJ Z6CujBRd3A7/3h+Iw72jKGBag4iEBI9uBcyeRgWdzcV7l7dzQT00XCBnkJJRJV4j oDIovquAz6iKf4Al4wyQ5k1RM9KAlUipIFkCAwEAAaNFMEMwDgYDVR0PAQH/BAQD AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFP8lIbNl3zZPEHF17cFU NFsK/0/oMA0GCSqGSIb3DQEBCwUAA4GBADMCd4nzc19voa60lNknhsihcfyNUeUt EEsLCceK+9F1u2Xdj+mTNOh3MI+5m7wmFLiHuUtQovHMJ4xUpoHa6Iznc+QCbow4 SMO3sf1847tASv3eUFwEUt9vv39vtey6C6ftiUUImzZYfx6FO/A62uGEg2w3IOJ+ 3cCXYiulfsyv -----END CERTIFICATE-----` var ca1keyStr = `-----BEGIN RSA PRIVATE KEY----- MIICXwIBAAKBgQDpN3e+wD9/2UdA94jMkH0nUlNdoNV+qgKVTZlGW5rsGaV9c5Ce Cj/U6Z607dMcSv6bEUwaB8lnoK6MFF3cDv/eH4jDvaMoYFqDiIQEj24FzJ5GBZ3N xXuXt3NBPTRcIGeQklElXiOgMii+q4DPqIp/gCXjDJDmTVEz0oCVSKkgWQIDAQAB AoGBANC3T3drXmjw74/4+Hj7Jsa2Kt20Pt1pEX7FP9Nz0CZUnYK0lkyaJ55IpjyO S00a4NmulUkGhv0zFINRBt8WnW1bjBxNmqyBYh2diO3vA/gk8U1gcifW1LQt8WmE ietvN3OFXI1a7FipchCZYQn5Rr8O3a/tjwohtWIDdaDltw+xAkEA7Ybxu8OXQnvy Y+fDISRGG5vDFGnNGe9KcREIxSF6LWJ7+ap5LmMxnhfag5qlrObQW3K2miTpGYkl CIRRNFMIvwJBAPtatE1evu25R3NSTU2YwQgkEymh40PW+lncYge6ZqZGfK7J5JBK wr1ug7KjTJgIfY2Sg2VHn56HAdA4RUl2xOcCQQDZqnTxpQ6DHYSFqwg04cHhYP8H QOF0Z8WnEX4g8Em/N2X26BK+wKXig2d6fIhghu/fLaNKZJK8FOK8CE1GDuWPAkEA wrP6Ysx3vZH+JPil5Ovk6zd2mJNMhmpqt10dmrrrdPW483R01sjynOaUobYZSNOa 3iWWHsgifxw5bV+JXGTiFQJBAKwh6Hvli5hcfoepPMz2RQnmU1NM8hJOHHeZh+eT z6hlMpOS9rSjABcBdXxXjFXtIEjWUG5Tj8yOYd735zY8Ny8= -----END RSA PRIVATE KEY-----` pemToCertificate := func(bytes []byte) x509.Certificate { pem, _ := pem.Decode(bytes) if pem == nil { panic("failed decoding PEM") } certificate, err := x509.ParseCertificate(pem.Bytes) if err != nil { panic(err) } return *certificate } BeforeEach(func() { fs = *boshsysfakes.NewFakeFileSystem() fs.WriteFileString("/path/ca1/certificate", ca1crtStr) fs.WriteFileString("/path/ca1/private_key", ca1keyStr) fs.WriteFileString("/path/ca2/certificate", "broken") fs.WriteFileString("/path/ca2/private_key", "broken") logger, _ = logrustest.NewNullLogger() }) Describe("SignCertificate", func() { var testKey *rsa.PrivateKey var template x509.Certificate BeforeEach(func() { var err error testKey, err = rsa.GenerateKey(rand.Reader, 1024) if err != nil { Fail("generating private key") } template = x509.Certificate{ SerialNumber: big.NewInt(12345), Subject: pkix.Name{ CommonName: "ssoca-fake1", }, } }) It("signs certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) bytes, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).ToNot(HaveOccurred()) Expect(len(bytes)).To(BeNumerically(">", 0)) certificate := pemToCertificate(bytes) Expect(certificate.SerialNumber).To(BeEquivalentTo(big.NewInt(12345))) Expect(certificate.Subject.CommonName).To(Equal("ssoca-fake1")) caCertificate := pemToCertificate([]byte(ca1crtStr)) err = certificate.CheckSignatureFrom(&caCertificate) Expect(err).ToNot(HaveOccurred()) }) Context("certificate/key errors", func() { It("errors on missing certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca0/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA certificate")) }) It("errors on missing private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on misconfigured private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/certificate", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on invalid private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca2/certificate", }, &fs, logger, ) _, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) }) }) Describe("SignSSHCertificate", func() { var testKey *rsa.PrivateKey var cert ssh.Certificate BeforeEach(func() { var err error testKey, err = rsa.GenerateKey(rand.Reader, 1024) if err != nil { Fail("generating private key") } publicKey, err := ssh.NewPublicKey(&testKey.PublicKey) if err != nil
cert = ssh.Certificate{ Nonce: []byte("ssoca-fake1"), Key: publicKey, CertType: ssh.UserCert, } }) It("signs certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) Expect(cert.Signature).To(BeNil()) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).ToNot(HaveOccurred()) // @todo use Verify instead Expect(cert.Signature).ToNot(BeNil()) }) Context("certificate/key errors", func() { It("errors on missing private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on misconfigured private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/certificate", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) It("errors on invalid private key", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca2/certificate", }, &fs, logger, ) err := subject.SignSSHCertificate(&cert, logrus.Fields{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("getting CA private key")) }) }) }) Describe("GetCertificate", func() { It("provides certificate", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) crt, err := subject.GetCertificate() Expect(err).ToNot(HaveOccurred()) Expect(crt).To(BeAssignableToTypeOf(&x509.Certificate{})) Expect(crt.IsCA).To(BeTrue()) Expect(crt.Subject.CommonName).To(Equal("ssoca-test")) }) Context("filesystem errors", func() { It("errors", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca0/certificate", PrivateKeyPath: "/path/ca0/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("reading certificate")) }) }) Context("bad certificate contents", func() { It("errors", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca2/certificate", PrivateKeyPath: "/path/ca2/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed decoding certificate PEM")) }) }) Context("invalid certificate format", func() { It("errors", func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/private_key", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) _, err := subject.GetCertificate() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("parsing certificate")) }) }) }) Describe("GetCertificatePEM", func() { var subject Provider BeforeEach(func() { subject = NewProvider( "name1", Config{ CertificatePath: "/path/ca1/certificate", PrivateKeyPath: "/path/ca1/private_key", }, &fs, logger, ) }) It("provides string", func() { crt, err := subject.GetCertificatePEM() Expect(err).ToNot(HaveOccurred()) Expect(crt).To(Equal(ca1crtStr)) }) Context("with filesystem error", func() { It("errors", func() { fs.RegisterReadFileError("/path/ca1/certificate", errors.New("fake-error1")) _, err := subject.GetCertificatePEM() Expect(err).To(HaveOccurred()) }) }) }) })
{ Fail("parsing to public key") }
conditional_block
MessageSigner.go
// Package messaging for signing and encryption of messages package messaging import ( "crypto/ecdsa" "crypto/rand" "crypto/sha256" "encoding/asn1" "encoding/base64" "encoding/json" "errors" "fmt" "reflect" "github.com/iotdomain/iotdomain-go/types" "github.com/sirupsen/logrus" "gopkg.in/square/go-jose.v2" ) // MessageSigner for signing and verifying of signed and encrypted messages type MessageSigner struct { // GetPublicKey when available is used in mess to verify signature GetPublicKey func(address string) *ecdsa.PublicKey // must be a variable messenger IMessenger signMessages bool // flag, sign outgoing messages. Default is true. Disable for testing privateKey *ecdsa.PrivateKey // private key for signing and decryption } // DecodeMessage decrypts the message and verifies the sender signature . // The sender and signer of the message is contained the message 'sender' field. If the // Sender field is missing then the 'address' field is used as sender. // object must hold the expected message type to decode the json message containging the sender info func (signer *MessageSigner) DecodeMessage(rawMessage string, object interface{}) (isEncrypted bool, isSigned bool, err error) { dmessage, isEncrypted, err := DecryptMessage(rawMessage, signer.privateKey) isSigned, err = VerifySenderJWSSignature(dmessage, object, signer.GetPublicKey) return isEncrypted, isSigned, err } // SignMessages returns whether messages MUST be signed on sending or receiving func (signer *MessageSigner) SignMessages() bool { return signer.signMessages } // VerifySignedMessage parses and verifies the message signature // as per standard, the sender and signer of the message is in the message 'Sender' field. If the // Sender field is missing then the 'address' field contains the publisher. // or 'address' field func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) { isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey) return isSigned, err } // PublishObject encapsulates the message object in a payload, signs the message, and sends it. // If an encryption key is provided then the signed message will be encrypted. // The object to publish will be marshalled to JSON and signed by this publisher func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error { // payload, err := json.Marshal(object) payload, err := json.MarshalIndent(object, " ", " ") if err != nil || object == nil { errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err) return errors.New(errText) } if encryptionKey != nil { err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey) } else { err = signer.PublishSigned(address, retained, string(payload)) } return err } // SetSignMessages enables or disables message signing. Intended for testing. func (signer *MessageSigner) SetSignMessages(sign bool) { signer.signMessages = sign } // Subscribe to messages on the given address func (signer *MessageSigner) Subscribe( address string, handler func(address string, message string) error) { signer.messenger.Subscribe(address, handler) } // Unsubscribe to messages on the given address func (signer *MessageSigner) Unsubscribe( address string, handler func(address string, message string) error) { signer.messenger.Unsubscribe(address, handler) } // PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address // Signing only happens if the publisher's signingMethod is set to SigningMethodJWS func (signer *MessageSigner) PublishEncrypted( address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error { var err error message := payload // first sign, then encrypt as per RFC if signer.signMessages { message, _ = CreateJWSSignature(string(payload), signer.privateKey) } emessage, err := EncryptMessage(message, publicKey) err = signer.messenger.Publish(address, retained, emessage) return err } // PublishSigned sign the payload and publish the resulting message on the given address // Signing only happens if the publisher's signingMethod is set to SigningMethodJWS func (signer *MessageSigner) PublishSigned( address string, retained bool, payload string) error { var err error // default is unsigned message := payload if signer.signMessages { message, err = CreateJWSSignature(string(payload), signer.privateKey) if err != nil { logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err) } } err = signer.messenger.Publish(address, retained, message) return err } // NewMessageSigner creates a new instance for signing and verifying published messages // If getPublicKey is not provided, verification of signature is skipped func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey, getPublicKey func(address string) *ecdsa.PublicKey, ) *MessageSigner { signer := &MessageSigner{ GetPublicKey: getPublicKey, messenger: messenger, signMessages: true, privateKey: signingKey, // private key for signing } return signer } /* * Helper Functions for signing and verification */ // CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key // This returns a base64url encoded signature func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string { if privateKey == nil { return "" } hashed := sha256.Sum256(payload) r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:]) if err != nil { return "" } sig, err := asn1.Marshal(ECDSASignature{r, s}) return base64.URLEncoding.EncodeToString(sig) } // SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) { identCopy := *publicIdent identCopy.IdentitySignature = "" payload, _ := json.Marshal(identCopy) sigStr := CreateEcdsaSignature(payload, privKey) publicIdent.IdentitySignature = sigStr } // CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) { joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil) if err != nil { return "", err } signedObject, err := joseSigner.Sign([]byte(payload)) if err != nil { return "", err } // serialized := signedObject.FullSerialize() serialized, err := signedObject.CompactSerialize() return serialized, err } // DecryptMessage deserializes and decrypts the message using JWE // This returns the decrypted message, or the input message if the message was not encrypted func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) { message = serialized decrypter, err := jose.ParseEncrypted(serialized) if err == nil { dmessage, err := decrypter.Decrypt(privateKey) message = string(dmessage) return message, true, err } return message, false, err } // EncryptMessage encrypts and serializes the message using JWE func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) { var jwe *jose.JSONWebEncryption recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey} encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil) if encrypter != nil { jwe, err = encrypter.Encrypt([]byte(message)) } if err != nil { return message, err } serialized, _ = jwe.CompactSerialize() return serialized, err } // VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity // against the identity itself using the sender's public key. func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error { // the signing took place with the signature field empty identCopy := *ident identCopy.IdentitySignature = "" payload, _ := json.Marshal(identCopy) err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey) // signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey} // joseSigner, _ := jose.NewSigner(signingKey, nil) // jwsObject, _ := joseSigner.Verify(payload) // sig := jwsObject.Signatures[0].Signature // sigStr := base64.URLEncoding.EncodeToString(sig) // return sigStr return err } // VerifyEcdsaSignature the payload using the base64url encoded signature and public key // payload is any raw data // signatureB64urlEncoded is the ecdsa 256 URL encoded signature // Intended for signing an object like the publisher identity. Use VerifyJWSMessage for // verifying JWS signed messages. func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error { var rs ECDSASignature if publicKey == nil { return errors.New("VerifyEcdsaSignature: publicKey is nil") } signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded) if err != nil { return errors.New("VerifyEcdsaSignature: Invalid signature") } if _, err = asn1.Unmarshal(signature, &rs); err != nil { return errors.New("VerifyEcdsaSignature: Payload is not ASN") } hashed := sha256.Sum256(payload) verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S) if !verified { return errors.New("VerifyEcdsaSignature: Signature does not match payload") } return nil } // VerifyJWSMessage verifies a signed message and returns its payload // The message is a JWS encoded string. The public key of the sender is // needed to verify the message. // Intended for testing, as the application uses VerifySenderJWSSignature instead. func VerifyJWSMessage(message string, publicKey *ecdsa.PublicKey) (payload string, err error) { if publicKey == nil { err := errors.New("VerifyJWSMessage: public key is nil") return "", err } jwsSignature, err := jose.ParseSigned(message) if err != nil { return "", err } payloadB, err := jwsSignature.Verify(publicKey) return string(payloadB), err } // VerifySenderJWSSignature verifies if a message is JWS signed. If signed then the signature is verified // using the 'Sender' or 'Address' attributes to determine the public key to verify with. // To verify correctly, the sender has to be a known publisher and verified with the DSS. // object MUST be a pointer to the type otherwise unmarshal fails. // // getPublicKey is a lookup function for providing the public key from the given sender address. // it should only provide a public key if the publisher is known and verified by the DSS, or // if this zone does not use a DSS (publisher are protected through message bus ACLs) // If not provided then signature verification will succeed. // // The rawMessage is json unmarshalled into the given object. // // This returns a flag if the message was signed and if so, an error if the verification failed func VerifySenderJWSSignature(rawMessage string, object interface{}, getPublicKey func(address string) *ecdsa.PublicKey) (isSigned bool, err error) { jwsSignature, err := jose.ParseSigned(rawMessage) if err != nil { // message is (probably) not signed, try to unmarshal it directly err = json.Unmarshal([]byte(rawMessage), object) return false, err } payload := jwsSignature.UnsafePayloadWithoutVerification() err = json.Unmarshal([]byte(payload), object) if err != nil { // message doesn't have a json payload errTxt := fmt.Sprintf("VerifySenderSignature: Signature okay but message unmarshal failed: %s", err) return true, errors.New(errTxt) } // determine who the sender is reflObject := reflect.ValueOf(object).Elem() reflSender := reflObject.FieldByName("Sender") if !reflSender.IsValid()
sender := reflSender.String() if sender == "" { err := errors.New("VerifySenderJWSSignature: Missing sender or address information in message") return true, err } // verify the message signature using the sender's public key if getPublicKey == nil { return true, nil } publicKey := getPublicKey(sender) if publicKey == nil { err := errors.New("VerifySenderJWSSignature: No public key available for sender " + sender) return true, err } _, err = jwsSignature.Verify(publicKey) if err != nil { msg := fmt.Sprintf("VerifySenderJWSSignature: message signature from %s fails to verify with its public key", sender) err := errors.New(msg) return true, err } return true, err }
{ reflSender = reflObject.FieldByName("Address") if !reflSender.IsValid() { err = errors.New("VerifySenderJWSSignature: object doesn't have a Sender or Address field") return true, err } }
conditional_block
MessageSigner.go
// Package messaging for signing and encryption of messages package messaging import ( "crypto/ecdsa" "crypto/rand" "crypto/sha256" "encoding/asn1" "encoding/base64" "encoding/json" "errors" "fmt" "reflect" "github.com/iotdomain/iotdomain-go/types" "github.com/sirupsen/logrus" "gopkg.in/square/go-jose.v2" ) // MessageSigner for signing and verifying of signed and encrypted messages type MessageSigner struct { // GetPublicKey when available is used in mess to verify signature GetPublicKey func(address string) *ecdsa.PublicKey // must be a variable messenger IMessenger signMessages bool // flag, sign outgoing messages. Default is true. Disable for testing privateKey *ecdsa.PrivateKey // private key for signing and decryption } // DecodeMessage decrypts the message and verifies the sender signature . // The sender and signer of the message is contained the message 'sender' field. If the // Sender field is missing then the 'address' field is used as sender. // object must hold the expected message type to decode the json message containging the sender info func (signer *MessageSigner) DecodeMessage(rawMessage string, object interface{}) (isEncrypted bool, isSigned bool, err error) { dmessage, isEncrypted, err := DecryptMessage(rawMessage, signer.privateKey) isSigned, err = VerifySenderJWSSignature(dmessage, object, signer.GetPublicKey) return isEncrypted, isSigned, err } // SignMessages returns whether messages MUST be signed on sending or receiving func (signer *MessageSigner) SignMessages() bool { return signer.signMessages } // VerifySignedMessage parses and verifies the message signature // as per standard, the sender and signer of the message is in the message 'Sender' field. If the // Sender field is missing then the 'address' field contains the publisher. // or 'address' field func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) { isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey) return isSigned, err } // PublishObject encapsulates the message object in a payload, signs the message, and sends it. // If an encryption key is provided then the signed message will be encrypted. // The object to publish will be marshalled to JSON and signed by this publisher func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error { // payload, err := json.Marshal(object) payload, err := json.MarshalIndent(object, " ", " ") if err != nil || object == nil { errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err) return errors.New(errText) } if encryptionKey != nil { err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey) } else { err = signer.PublishSigned(address, retained, string(payload)) } return err } // SetSignMessages enables or disables message signing. Intended for testing. func (signer *MessageSigner) SetSignMessages(sign bool) { signer.signMessages = sign } // Subscribe to messages on the given address func (signer *MessageSigner) Subscribe( address string, handler func(address string, message string) error) { signer.messenger.Subscribe(address, handler) } // Unsubscribe to messages on the given address func (signer *MessageSigner) Unsubscribe( address string, handler func(address string, message string) error) { signer.messenger.Unsubscribe(address, handler)
func (signer *MessageSigner) PublishEncrypted( address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error { var err error message := payload // first sign, then encrypt as per RFC if signer.signMessages { message, _ = CreateJWSSignature(string(payload), signer.privateKey) } emessage, err := EncryptMessage(message, publicKey) err = signer.messenger.Publish(address, retained, emessage) return err } // PublishSigned sign the payload and publish the resulting message on the given address // Signing only happens if the publisher's signingMethod is set to SigningMethodJWS func (signer *MessageSigner) PublishSigned( address string, retained bool, payload string) error { var err error // default is unsigned message := payload if signer.signMessages { message, err = CreateJWSSignature(string(payload), signer.privateKey) if err != nil { logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err) } } err = signer.messenger.Publish(address, retained, message) return err } // NewMessageSigner creates a new instance for signing and verifying published messages // If getPublicKey is not provided, verification of signature is skipped func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey, getPublicKey func(address string) *ecdsa.PublicKey, ) *MessageSigner { signer := &MessageSigner{ GetPublicKey: getPublicKey, messenger: messenger, signMessages: true, privateKey: signingKey, // private key for signing } return signer } /* * Helper Functions for signing and verification */ // CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key // This returns a base64url encoded signature func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string { if privateKey == nil { return "" } hashed := sha256.Sum256(payload) r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:]) if err != nil { return "" } sig, err := asn1.Marshal(ECDSASignature{r, s}) return base64.URLEncoding.EncodeToString(sig) } // SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) { identCopy := *publicIdent identCopy.IdentitySignature = "" payload, _ := json.Marshal(identCopy) sigStr := CreateEcdsaSignature(payload, privKey) publicIdent.IdentitySignature = sigStr } // CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) { joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil) if err != nil { return "", err } signedObject, err := joseSigner.Sign([]byte(payload)) if err != nil { return "", err } // serialized := signedObject.FullSerialize() serialized, err := signedObject.CompactSerialize() return serialized, err } // DecryptMessage deserializes and decrypts the message using JWE // This returns the decrypted message, or the input message if the message was not encrypted func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) { message = serialized decrypter, err := jose.ParseEncrypted(serialized) if err == nil { dmessage, err := decrypter.Decrypt(privateKey) message = string(dmessage) return message, true, err } return message, false, err } // EncryptMessage encrypts and serializes the message using JWE func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) { var jwe *jose.JSONWebEncryption recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey} encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil) if encrypter != nil { jwe, err = encrypter.Encrypt([]byte(message)) } if err != nil { return message, err } serialized, _ = jwe.CompactSerialize() return serialized, err } // VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity // against the identity itself using the sender's public key. func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error { // the signing took place with the signature field empty identCopy := *ident identCopy.IdentitySignature = "" payload, _ := json.Marshal(identCopy) err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey) // signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey} // joseSigner, _ := jose.NewSigner(signingKey, nil) // jwsObject, _ := joseSigner.Verify(payload) // sig := jwsObject.Signatures[0].Signature // sigStr := base64.URLEncoding.EncodeToString(sig) // return sigStr return err } // VerifyEcdsaSignature the payload using the base64url encoded signature and public key // payload is any raw data // signatureB64urlEncoded is the ecdsa 256 URL encoded signature // Intended for signing an object like the publisher identity. Use VerifyJWSMessage for // verifying JWS signed messages. func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error { var rs ECDSASignature if publicKey == nil { return errors.New("VerifyEcdsaSignature: publicKey is nil") } signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded) if err != nil { return errors.New("VerifyEcdsaSignature: Invalid signature") } if _, err = asn1.Unmarshal(signature, &rs); err != nil { return errors.New("VerifyEcdsaSignature: Payload is not ASN") } hashed := sha256.Sum256(payload) verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S) if !verified { return errors.New("VerifyEcdsaSignature: Signature does not match payload") } return nil } // VerifyJWSMessage verifies a signed message and returns its payload // The message is a JWS encoded string. The public key of the sender is // needed to verify the message. // Intended for testing, as the application uses VerifySenderJWSSignature instead. func VerifyJWSMessage(message string, publicKey *ecdsa.PublicKey) (payload string, err error) { if publicKey == nil { err := errors.New("VerifyJWSMessage: public key is nil") return "", err } jwsSignature, err := jose.ParseSigned(message) if err != nil { return "", err } payloadB, err := jwsSignature.Verify(publicKey) return string(payloadB), err } // VerifySenderJWSSignature verifies if a message is JWS signed. If signed then the signature is verified // using the 'Sender' or 'Address' attributes to determine the public key to verify with. // To verify correctly, the sender has to be a known publisher and verified with the DSS. // object MUST be a pointer to the type otherwise unmarshal fails. // // getPublicKey is a lookup function for providing the public key from the given sender address. // it should only provide a public key if the publisher is known and verified by the DSS, or // if this zone does not use a DSS (publisher are protected through message bus ACLs) // If not provided then signature verification will succeed. // // The rawMessage is json unmarshalled into the given object. // // This returns a flag if the message was signed and if so, an error if the verification failed func VerifySenderJWSSignature(rawMessage string, object interface{}, getPublicKey func(address string) *ecdsa.PublicKey) (isSigned bool, err error) { jwsSignature, err := jose.ParseSigned(rawMessage) if err != nil { // message is (probably) not signed, try to unmarshal it directly err = json.Unmarshal([]byte(rawMessage), object) return false, err } payload := jwsSignature.UnsafePayloadWithoutVerification() err = json.Unmarshal([]byte(payload), object) if err != nil { // message doesn't have a json payload errTxt := fmt.Sprintf("VerifySenderSignature: Signature okay but message unmarshal failed: %s", err) return true, errors.New(errTxt) } // determine who the sender is reflObject := reflect.ValueOf(object).Elem() reflSender := reflObject.FieldByName("Sender") if !reflSender.IsValid() { reflSender = reflObject.FieldByName("Address") if !reflSender.IsValid() { err = errors.New("VerifySenderJWSSignature: object doesn't have a Sender or Address field") return true, err } } sender := reflSender.String() if sender == "" { err := errors.New("VerifySenderJWSSignature: Missing sender or address information in message") return true, err } // verify the message signature using the sender's public key if getPublicKey == nil { return true, nil } publicKey := getPublicKey(sender) if publicKey == nil { err := errors.New("VerifySenderJWSSignature: No public key available for sender " + sender) return true, err } _, err = jwsSignature.Verify(publicKey) if err != nil { msg := fmt.Sprintf("VerifySenderJWSSignature: message signature from %s fails to verify with its public key", sender) err := errors.New(msg) return true, err } return true, err }
} // PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address // Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
random_line_split