file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
payment.component.ts | import { Component, OnInit, Input, Output, ViewEncapsulation, EventEmitter, NgZone } from '@angular/core';
import { Http, Response, RequestOptions, Headers } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { AppService } from '../app-service.service';
import { AlertService } from '../alert.service';
import { LoaderService } from '../loader.service';
import { LoggerService } from '../logger.service';
import { environment } from '../../environments/environment';
import { NotificationsService } from 'angular2-notifications';
import { Title } from '@angular/platform-browser';
declare var Razorpay: any;
declare var $: any;
@Component({
selector: 'app-payment',
templateUrl: './payment.component.html',
styleUrls: ['./payment.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class | implements OnInit {
// @Input() tab: string;
// @Input() user;
@Output() renewalDateUpdated: EventEmitter<any> = new EventEmitter();
@Output() planTypeUpdated: EventEmitter<any> = new EventEmitter();
httpOptions: RequestOptions;
session: any;
billingAmount: any;
invoices: any;
environment: any;
subscription: any;
isPlanActive: Boolean;
payments: any;
selectedDate: any;
selectedCurrency: string;
settingsTab: string;
paymentMethod: string;
step1: any;
activeAccordion: string;
durations:Array<any>;
planDuration:string;
currentInvoice = null;
constructor(private http: Http, private router: Router, private route: ActivatedRoute,
private _appService: AppService, private loadingService: LoaderService,
private alertService: AlertService, private notify: NotificationsService,
private loggerService: LoggerService, private ngZone: NgZone, private titleService: Title) { }
ngOnInit() {
this.titleService.setTitle('Payments and Subscription | OCLAVI');
this.loadingService.show('Getting your payment details...');
this.environment = environment;
this.httpOptions = new RequestOptions({ withCredentials: true });
this.session = JSON.parse(localStorage.getItem('user'));
this.selectedCurrency = 'USD';
this.paymentMethod = 'razorpay';
this.isPlanActive = true;
if (this.session.USER_TYPE == environment.USER_TYPE.TEAM.NAME) {
this.router.navigate(['profile']);
}
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME || this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME) {
if (this.session.PLAN_END_DATE < (new Date().getTime())) {
this.alertService.show('error', 'Your subscription has expired. Please upgrade');
this.isPlanActive = false;
}
}
this.subscription = {
startDate: '',
nextDueDate: '',
subscriptionPlan: '',
lastPaymentStatus: 'Successfull'
}
this.durations = [{
durationLabel: '1 Month',
durationValue: 1,
}, {
durationLabel: '3 Months',
durationValue: 3,
}, {
durationLabel: '6 Months',
durationValue: 6,
}, {
durationLabel: '1 Year',
durationValue: 12,
}]
this.step1 = {
step: 1,
title: 'upgrage_plan_payment',
content: `Payments Section`,
class: '',
status: 'inactive',
methods: [{
name: 'razorpay',
description: 'For Indian Credit / Debit Cards'
}, {
name: 'paypal',
description: 'For international cards'
}],
location: '',
selectedMethod: 'razorpay'
};
this.http.get(environment.oclaviServer + 'subscriptionDetails', this.httpOptions).subscribe(res => {
this.payments = res.json();
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error feteching subscription details.');
});
}
durationChanged(duration) {
this.planDuration = duration.durationValue;
}
cancelSubscription() {
this.alertService.show('warn', 'Your active subscription would be cancelled.<br /><br />Are you sure you want to cancel your subscription?');
this.alertService.positiveCallback = (() => {
this.alertService.hide();
this.loadingService.show('Cancelling your subscription details...');
this.session = JSON.parse(localStorage.getItem('user'));
this.http.post(environment.oclaviServer + 'cancelSubscription', { purpose: 'CANCEL_SUBSCRIPTION' }, this.httpOptions).subscribe(res => {
this.notify.success('Your subscription has been successfully cancelled.');
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_ADMIN.NAME;
else if (this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_SELF.NAME;
this.session.STATUS = 'PENDING_FOR_CANCELLATION';
localStorage.setItem('user', JSON.stringify(this.session));
this.planTypeUpdated.emit(this.session.USER_TYPE);
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error cancelling your subscription.');
});
});
}
openModal(id) {
for (let i = 1; i <= 1; i++) {
if (i === 1) {
this['step' + i].status = 'active';
this['step' + i].class = 'show';
this.activeAccordion = this['step' + i].title.replace(/_/g, ' ');
} else {
this['step' + i].status = 'inactive';
this['step' + i].class = '';
}
}
$('#' + id).modal(open);
}
payNow(vm, paymentMethod) {
$('#upgradePaymentModal').modal('hide');
if (paymentMethod == 'paypal')
vm.paypalCheckout(vm);
else if (paymentMethod == 'razorpay')
vm.razorpayCheckout(vm);
}
changeSettingsTab(tab) {
this.settingsTab = tab;
}
showPaymentModal() {
if(!this.planDuration || this.planDuration == '') {
this.notify.error('Please select plan duration.');
return;
}
this.loadingService.show('Loading payment information. Please wait...');
this.http.post(environment.oclaviServer + 'getBillingAmount', { type: 'upgrade', planDuration: this.planDuration }, this.httpOptions).subscribe(res => {
this.loadingService.hide();
this.openModal('upgradePaymentModal');
this.billingAmount = res.json();
this.billingAmount.planEndDate += ((new Date()).getTimezoneOffset() * 60 * 1000);
}, err => {
this.errorHandler(err, 'Error upgrading your subscription.');
});
}
razorpayCheckout(vm) {
vm.http.post(environment.oclaviServer + 'razorpay/getModalData', { type: 'upgrade', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.hide();
let data = res.json();
if(data.AMOUNT < 100)
data.AMOUNT = 100;
var options = {
key: data.KEY,
name: data.MERCHANT_NAME,
amount: data.AMOUNT,
description: data.DESCRIPTION,
image: '../assets/images/Oclavi_Logo@2x.png',
prefill: {
name: data.EMAIL_ID,
email: data.EMAIL_ID
},
theme: {
color: '#3D78E0'
},
handler: (response) => {
vm.ngZone.run(() => {
data.PAYMENT_ID = response.razorpay_payment_id;
data.PAYMENT_SOURCE = 'RAZOR_PAY';
data.PLAN_START_DATE = vm.billingAmount.planStartDate;
data.PLAN_END_DATE = vm.billingAmount.planEndDate;
vm.http.post(environment.oclaviServer + 'upgradePlan', data, vm.httpOptions).subscribe(res => {
vm.router.navigate(['/payment-status/razorpay/success/upgrade'], {
queryParams: {
razorpay_payment_id: response.razorpay_payment_id
}
});
});
});
}
}
var razorpay = new Razorpay(options);
razorpay.open();
}, err => {
vm.errorHandler(err, 'Error upgrading your subscription.');
});
}
paymentMethodChanged($event) {
this.paymentMethod = $event.target.value;
if (this.paymentMethod == 'paypal')
this.selectedCurrency = 'USD';
else if (this.paymentMethod == 'razorpay')
this.selectedCurrency = 'INR';
}
paypalCheckout(vm) {
vm.loadingService.show('Creating your paypal transaction...');
vm.http.post(environment.oclaviServer + 'upgradePlan', { PAYMENT_SOURCE: 'PAYPAL', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.show('Redirecting to payment page...');
var body = res.json();
window.location.href = body.approval_url;
}, err => {
vm.errorHandler(err, 'Error while buying more seats...');
});
}
selectInvoiceDate(newSelectedDate) {
this.selectedDate = newSelectedDate;
}
errorHandler(response, message) {
this.loadingService.hide();
if (response.status == 401) {
this.router.navigate(['login']);
localStorage.removeItem('user');
}
else {
var text = JSON.parse(response._body).message;
if (!text || text == '')
this.notify.error(null, message);
else
this.notify.error(null, text);
}
}
setCurrentInvoice (index) {
this.currentInvoice = this.payments[index];
}
}
| PaymentComponent | identifier_name |
payment.component.ts | import { Component, OnInit, Input, Output, ViewEncapsulation, EventEmitter, NgZone } from '@angular/core';
import { Http, Response, RequestOptions, Headers } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { AppService } from '../app-service.service';
import { AlertService } from '../alert.service';
import { LoaderService } from '../loader.service';
import { LoggerService } from '../logger.service';
import { environment } from '../../environments/environment';
import { NotificationsService } from 'angular2-notifications';
import { Title } from '@angular/platform-browser';
declare var Razorpay: any;
declare var $: any;
@Component({
selector: 'app-payment',
templateUrl: './payment.component.html',
styleUrls: ['./payment.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PaymentComponent implements OnInit {
// @Input() tab: string;
// @Input() user;
@Output() renewalDateUpdated: EventEmitter<any> = new EventEmitter();
@Output() planTypeUpdated: EventEmitter<any> = new EventEmitter();
httpOptions: RequestOptions;
session: any;
billingAmount: any;
invoices: any;
environment: any;
subscription: any;
isPlanActive: Boolean;
payments: any;
selectedDate: any;
selectedCurrency: string;
settingsTab: string;
paymentMethod: string;
step1: any;
activeAccordion: string;
durations:Array<any>;
planDuration:string;
currentInvoice = null;
constructor(private http: Http, private router: Router, private route: ActivatedRoute,
private _appService: AppService, private loadingService: LoaderService,
private alertService: AlertService, private notify: NotificationsService,
private loggerService: LoggerService, private ngZone: NgZone, private titleService: Title) { }
ngOnInit() {
this.titleService.setTitle('Payments and Subscription | OCLAVI');
this.loadingService.show('Getting your payment details...');
this.environment = environment;
this.httpOptions = new RequestOptions({ withCredentials: true });
this.session = JSON.parse(localStorage.getItem('user'));
this.selectedCurrency = 'USD';
this.paymentMethod = 'razorpay';
this.isPlanActive = true;
if (this.session.USER_TYPE == environment.USER_TYPE.TEAM.NAME) {
this.router.navigate(['profile']);
}
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME || this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME) {
if (this.session.PLAN_END_DATE < (new Date().getTime())) {
this.alertService.show('error', 'Your subscription has expired. Please upgrade');
this.isPlanActive = false;
}
}
this.subscription = {
startDate: '',
nextDueDate: '',
subscriptionPlan: '',
lastPaymentStatus: 'Successfull'
}
this.durations = [{
durationLabel: '1 Month',
durationValue: 1,
}, {
durationLabel: '3 Months',
durationValue: 3,
}, {
durationLabel: '6 Months',
durationValue: 6,
}, {
durationLabel: '1 Year',
durationValue: 12,
}]
this.step1 = {
step: 1,
title: 'upgrage_plan_payment',
content: `Payments Section`,
class: '',
status: 'inactive',
methods: [{
name: 'razorpay',
description: 'For Indian Credit / Debit Cards'
}, {
name: 'paypal',
description: 'For international cards'
}],
location: '',
selectedMethod: 'razorpay'
};
this.http.get(environment.oclaviServer + 'subscriptionDetails', this.httpOptions).subscribe(res => {
this.payments = res.json();
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error feteching subscription details.');
});
}
durationChanged(duration) |
cancelSubscription() {
this.alertService.show('warn', 'Your active subscription would be cancelled.<br /><br />Are you sure you want to cancel your subscription?');
this.alertService.positiveCallback = (() => {
this.alertService.hide();
this.loadingService.show('Cancelling your subscription details...');
this.session = JSON.parse(localStorage.getItem('user'));
this.http.post(environment.oclaviServer + 'cancelSubscription', { purpose: 'CANCEL_SUBSCRIPTION' }, this.httpOptions).subscribe(res => {
this.notify.success('Your subscription has been successfully cancelled.');
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_ADMIN.NAME;
else if (this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_SELF.NAME;
this.session.STATUS = 'PENDING_FOR_CANCELLATION';
localStorage.setItem('user', JSON.stringify(this.session));
this.planTypeUpdated.emit(this.session.USER_TYPE);
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error cancelling your subscription.');
});
});
}
openModal(id) {
for (let i = 1; i <= 1; i++) {
if (i === 1) {
this['step' + i].status = 'active';
this['step' + i].class = 'show';
this.activeAccordion = this['step' + i].title.replace(/_/g, ' ');
} else {
this['step' + i].status = 'inactive';
this['step' + i].class = '';
}
}
$('#' + id).modal(open);
}
payNow(vm, paymentMethod) {
$('#upgradePaymentModal').modal('hide');
if (paymentMethod == 'paypal')
vm.paypalCheckout(vm);
else if (paymentMethod == 'razorpay')
vm.razorpayCheckout(vm);
}
changeSettingsTab(tab) {
this.settingsTab = tab;
}
showPaymentModal() {
if(!this.planDuration || this.planDuration == '') {
this.notify.error('Please select plan duration.');
return;
}
this.loadingService.show('Loading payment information. Please wait...');
this.http.post(environment.oclaviServer + 'getBillingAmount', { type: 'upgrade', planDuration: this.planDuration }, this.httpOptions).subscribe(res => {
this.loadingService.hide();
this.openModal('upgradePaymentModal');
this.billingAmount = res.json();
this.billingAmount.planEndDate += ((new Date()).getTimezoneOffset() * 60 * 1000);
}, err => {
this.errorHandler(err, 'Error upgrading your subscription.');
});
}
razorpayCheckout(vm) {
vm.http.post(environment.oclaviServer + 'razorpay/getModalData', { type: 'upgrade', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.hide();
let data = res.json();
if(data.AMOUNT < 100)
data.AMOUNT = 100;
var options = {
key: data.KEY,
name: data.MERCHANT_NAME,
amount: data.AMOUNT,
description: data.DESCRIPTION,
image: '../assets/images/Oclavi_Logo@2x.png',
prefill: {
name: data.EMAIL_ID,
email: data.EMAIL_ID
},
theme: {
color: '#3D78E0'
},
handler: (response) => {
vm.ngZone.run(() => {
data.PAYMENT_ID = response.razorpay_payment_id;
data.PAYMENT_SOURCE = 'RAZOR_PAY';
data.PLAN_START_DATE = vm.billingAmount.planStartDate;
data.PLAN_END_DATE = vm.billingAmount.planEndDate;
vm.http.post(environment.oclaviServer + 'upgradePlan', data, vm.httpOptions).subscribe(res => {
vm.router.navigate(['/payment-status/razorpay/success/upgrade'], {
queryParams: {
razorpay_payment_id: response.razorpay_payment_id
}
});
});
});
}
}
var razorpay = new Razorpay(options);
razorpay.open();
}, err => {
vm.errorHandler(err, 'Error upgrading your subscription.');
});
}
paymentMethodChanged($event) {
this.paymentMethod = $event.target.value;
if (this.paymentMethod == 'paypal')
this.selectedCurrency = 'USD';
else if (this.paymentMethod == 'razorpay')
this.selectedCurrency = 'INR';
}
paypalCheckout(vm) {
vm.loadingService.show('Creating your paypal transaction...');
vm.http.post(environment.oclaviServer + 'upgradePlan', { PAYMENT_SOURCE: 'PAYPAL', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.show('Redirecting to payment page...');
var body = res.json();
window.location.href = body.approval_url;
}, err => {
vm.errorHandler(err, 'Error while buying more seats...');
});
}
selectInvoiceDate(newSelectedDate) {
this.selectedDate = newSelectedDate;
}
errorHandler(response, message) {
this.loadingService.hide();
if (response.status == 401) {
this.router.navigate(['login']);
localStorage.removeItem('user');
}
else {
var text = JSON.parse(response._body).message;
if (!text || text == '')
this.notify.error(null, message);
else
this.notify.error(null, text);
}
}
setCurrentInvoice (index) {
this.currentInvoice = this.payments[index];
}
}
| {
this.planDuration = duration.durationValue;
} | identifier_body |
mount_linux.go | // +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/fnv"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"path/filepath"
"go.uber.org/zap"
utilexec "k8s.io/utils/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
FIPS_ENABLED_FILE_PATH = "/host/proc/sys/crypto/fips_enabled"
ENCRYPTED_UMOUNT_COMMAND = "umount.oci-fss"
UMOUNT_COMMAND = "umount"
FINDMNT_COMMAND = "findmnt"
CAT_COMMAND = "cat"
RPM_COMMAND = "rpm"
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
logger *zap.SugaredLogger
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
// All Linux distros are expected to be shipped with a mount utility that an support bind mounts.
mounterPath := ""
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
}
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := []string{"nfs", "glusterfs", "ceph", "cifs"}
for _, fst := range fsTypesNeedMounter {
if fst == fstype {
mounterPath = mounter.mounterPath
}
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, options)
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
func doMount(logger *zap.SugaredLogger, mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
mountArgs := makeMountArgs(source, target, fstype, options)
if len(mounterPath) > 0 {
mountArgs = append([]string{mountCmd}, mountArgs...)
mountCmd = mounterPath
}
logger.With("command", mountCmd, "args", mountArgs).Info("Mounting")
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
logger.With(
zap.Error(err),
"command", mountCmd,
"source", source,
"target", target,
"fsType", fstype,
"options", options,
"output", string(output),
).Error("Mount failed.")
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %v\n",
err, mountCmd, source, target, fstype, options, string(output))
}
logger.Debugf("Mount output: %v", string(output))
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
return mounter.unmount(target, UMOUNT_COMMAND)
}
func (mounter *Mounter) unmount(target string, unmountCommand string) error {
mounter.logger.With("target", target).Info("Unmounting.")
command := exec.Command(unmountCommand, target) | if err != nil {
mounter.logger.With(
zap.Error(err),
"command", unmountCommand,
"target", target,
"output", string(output),
).Error("Unmount failed.")
return fmt.Errorf("Unmount failed: %v\nUnmounting command: %s\nUnmounting arguments: %s\nOutput: %v\n", err, unmountCommand, target, string(output))
}
mounter.logger.Debugf("unmount output: %v", string(output))
return nil
}
// Unmount unmounts the target.
func (mounter *Mounter) UnmountWithEncrypt(target string) error {
return mounter.unmount(target, ENCRYPTED_UMOUNT_COMMAND)
}
func FindMount(mounter Interface, target string) ([]string, error) {
mountArgs := []string{"-n", "-o", "SOURCE", "-T", target}
command := exec.Command(FINDMNT_COMMAND, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("findmnt failed: %v\narguments: %s\nOutput: %v\n", err, mountArgs, string(output))
}
sources := strings.Fields(string(output))
return sources, nil
}
func IsFipsEnabled(mounter Interface) (string, error) {
command := exec.Command(CAT_COMMAND, FIPS_ENABLED_FILE_PATH)
output, err := command.CombinedOutput()
if err != nil {
return "", fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, CAT_COMMAND, string(output))
}
return string(output), nil
}
func IsInTransitEncryptionPackageInstalled(mounter Interface) (bool, error) {
args := []string{"-q", "-a", "--root=/host"}
command := exec.Command(RPM_COMMAND, args...)
output, err := command.CombinedOutput()
if err != nil {
return false, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, RPM_COMMAND, string(output))
}
if len(output) > 0 {
list := string(output)
if strings.Contains(list, InTransitEncryptionPackageName) {
return true, nil
}
return false, nil
}
return false, nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// isMountPointMatch to evaluate whether the directory is a mountpoint.
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint.
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact.
if notMnt == false {
return notMnt, nil
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts.
resolvedFile, err := filepath.EvalSymlinks(file)
if err != nil {
return true, err
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types.
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if isMountPointMatch(mp, resolvedFile) {
notMnt = false
break
}
}
return notMnt, nil
}
// isMountPointMatch returns true if the path in mp is the same as dir.
// Handles case where mountpoint dir has been renamed due to stale NFS mount.
func isMountPointMatch(mp MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return ((mp.Path == dir) || (mp.Path == deletedDir))
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(mounter.logger, pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(logger *zap.SugaredLogger, pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
logger.With("path", pathname).Warn("Path does not refer to a device.")
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter.logger, mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) {
hash := fnv.New32a()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
mounter.Logger = mounter.Logger.With(
"source", source,
"target", target,
"fstype", fstype,
"options", options,
)
// Run fsck on the disk to fix repairable issues
mounter.Logger.Info("Checking disk for issues using 'fsck'.")
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
mounter.Logger = mounter.Logger.With("output", out)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
mounter.Logger.Info("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
mounter.Logger.Info("Device has errors that were corrected with 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
mounter.Logger.Info("'fsck' found errors on device but was unable to correct them.")
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
mounter.Logger.Error("'fsck' error.")
}
}
// Try to mount the disk
mounter.Logger.Info("Attempting to mount disk.")
mountErr := mounter.Interface.Mount(source, target, fstype, options)
if mountErr != nil {
// Mount failed. This indicates either that the disk is unformatted or
// it contains an unexpected filesystem.
existingFormat, err := mounter.getDiskFormat(source)
if err != nil {
return err
}
if existingFormat == "" {
// Disk is unformatted so format it.
args = []string{source}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-F", source}
}
mounter.Logger.With("argruments", args).Info("Disk appears to be unformatted, attempting to format.")
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
mounter.Logger.Info("Disk successfully formatted.")
return mounter.Interface.Mount(source, target, fstype, options)
}
mounter.Logger.With(zap.Error(err)).Error("Format of disk failed.")
return err
} else {
// Disk is already formatted and failed to mount
if len(fstype) == 0 || fstype == existingFormat {
// This is mount error
return mountErr
} else {
// Block device is formatted with unexpected filesystem, let the user know
return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr)
}
}
}
return mountErr
}
// getDiskFormat uses 'blkid' to determine a given disk's format
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
mounter.Logger.Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
cmd := mounter.Runner.Command("blkid", args...)
dataOut, err := cmd.CombinedOutput()
output := string(dataOut)
mounter.Logger.Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
mounter.Logger.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
var fstype, pttype string
lines := strings.Split(output, "\n")
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(pttype) > 0 {
mounter.Logger.Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
return fstype, nil
}
func (mounter *SafeFormatAndMount) resize(devicePath string, volumePath string) (bool, error) {
format, err := mounter.getDiskFormat(devicePath)
if err != nil {
formatErr := fmt.Errorf("error checking format for device %s: %v", devicePath, err)
return false, formatErr
}
// If disk has no format, there is no need to resize the disk because mkfs.*
// by default will use whole disk anyways.
if format == "" {
return false, nil
}
mounter.Logger.With("devicePath", devicePath).Infof("Expanding mounted volume")
switch format {
case "ext3", "ext4":
return mounter.extResize(devicePath)
case "xfs":
return mounter.xfsResize(volumePath)
}
return false, fmt.Errorf("resize of format %s is not supported for device %s mounted at %s", format, devicePath, volumePath)
}
func (mounter *SafeFormatAndMount) extResize(devicePath string) (bool, error) {
cmd := mounter.Runner.Command("resize2fs", devicePath)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("devicePath", devicePath).Infof("Device resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) xfsResize(deviceMountPath string) (bool, error) {
args := []string{"-d", deviceMountPath}
cmd := mounter.Runner.Command("xfs_growfs", args...)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("deviceMountPath", deviceMountPath).Infof("Device %s resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) rescan(devicePath string) error {
lsblkargs := []string{"-n", "-o", "NAME", devicePath}
lsblkcmd := mounter.Runner.Command("lsblk", lsblkargs...)
lsblkoutput, err := lsblkcmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to find device name associated with devicePath %s", devicePath)
}
deviceName := strings.TrimSpace(string(lsblkoutput))
if strings.HasPrefix(deviceName, "/dev/") {
deviceName = strings.TrimPrefix(deviceName, "/dev/")
}
mounter.Logger.With("deviceName", deviceName).Info("Rescanning")
// run command dd iflag=direct if=/dev/<device_name> of=/dev/null count=1
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
devicePathFileArg := fmt.Sprintf("if=%s", devicePath)
args := []string{"iflag=direct", devicePathFileArg, "of=/dev/null", "count=1"}
cmd := mounter.Runner.Command("dd", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "dd", string(output))
}
mounter.Logger.With("command", "dd", "output", string(output)).Debug("dd output")
// run command echo 1 | tee /sys/class/block/%s/device/rescan
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
cmdStr := fmt.Sprintf("echo 1 | tee /sys/class/block/%s/device/rescan", deviceName)
cmd = mounter.Runner.Command("bash", "-c", cmdStr)
output, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, cmdStr, string(output))
}
mounter.Logger.With("command", cmdStr, "output", string(output)).Debug("rescan output")
return nil
}
func (mounter *SafeFormatAndMount) getBlockSizeBytes(devicePath string) (int64, error) {
args := []string{"--getsize64", devicePath}
cmd := mounter.Runner.Command("blockdev", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return -1, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "blockdev", string(output))
}
strOut := strings.TrimSpace(string(output))
mounter.Logger.With("devicePath", devicePath, "command", "blockdev", "output", strOut).Debugf("Get block device size in bytes successful")
gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64)
if err != nil {
return -1, fmt.Errorf("failed to parse size %s into an int64 size", strOut)
}
return gotSizeBytes, nil
} | output, err := command.CombinedOutput() | random_line_split |
mount_linux.go | // +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/fnv"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"path/filepath"
"go.uber.org/zap"
utilexec "k8s.io/utils/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
FIPS_ENABLED_FILE_PATH = "/host/proc/sys/crypto/fips_enabled"
ENCRYPTED_UMOUNT_COMMAND = "umount.oci-fss"
UMOUNT_COMMAND = "umount"
FINDMNT_COMMAND = "findmnt"
CAT_COMMAND = "cat"
RPM_COMMAND = "rpm"
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
logger *zap.SugaredLogger
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
// All Linux distros are expected to be shipped with a mount utility that an support bind mounts.
mounterPath := ""
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
}
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := []string{"nfs", "glusterfs", "ceph", "cifs"}
for _, fst := range fsTypesNeedMounter {
if fst == fstype {
mounterPath = mounter.mounterPath
}
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, options)
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
func doMount(logger *zap.SugaredLogger, mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
mountArgs := makeMountArgs(source, target, fstype, options)
if len(mounterPath) > 0 {
mountArgs = append([]string{mountCmd}, mountArgs...)
mountCmd = mounterPath
}
logger.With("command", mountCmd, "args", mountArgs).Info("Mounting")
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
logger.With(
zap.Error(err),
"command", mountCmd,
"source", source,
"target", target,
"fsType", fstype,
"options", options,
"output", string(output),
).Error("Mount failed.")
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %v\n",
err, mountCmd, source, target, fstype, options, string(output))
}
logger.Debugf("Mount output: %v", string(output))
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) | (target string) error {
return mounter.unmount(target, UMOUNT_COMMAND)
}
func (mounter *Mounter) unmount(target string, unmountCommand string) error {
mounter.logger.With("target", target).Info("Unmounting.")
command := exec.Command(unmountCommand, target)
output, err := command.CombinedOutput()
if err != nil {
mounter.logger.With(
zap.Error(err),
"command", unmountCommand,
"target", target,
"output", string(output),
).Error("Unmount failed.")
return fmt.Errorf("Unmount failed: %v\nUnmounting command: %s\nUnmounting arguments: %s\nOutput: %v\n", err, unmountCommand, target, string(output))
}
mounter.logger.Debugf("unmount output: %v", string(output))
return nil
}
// Unmount unmounts the target.
func (mounter *Mounter) UnmountWithEncrypt(target string) error {
return mounter.unmount(target, ENCRYPTED_UMOUNT_COMMAND)
}
func FindMount(mounter Interface, target string) ([]string, error) {
mountArgs := []string{"-n", "-o", "SOURCE", "-T", target}
command := exec.Command(FINDMNT_COMMAND, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("findmnt failed: %v\narguments: %s\nOutput: %v\n", err, mountArgs, string(output))
}
sources := strings.Fields(string(output))
return sources, nil
}
func IsFipsEnabled(mounter Interface) (string, error) {
command := exec.Command(CAT_COMMAND, FIPS_ENABLED_FILE_PATH)
output, err := command.CombinedOutput()
if err != nil {
return "", fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, CAT_COMMAND, string(output))
}
return string(output), nil
}
func IsInTransitEncryptionPackageInstalled(mounter Interface) (bool, error) {
args := []string{"-q", "-a", "--root=/host"}
command := exec.Command(RPM_COMMAND, args...)
output, err := command.CombinedOutput()
if err != nil {
return false, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, RPM_COMMAND, string(output))
}
if len(output) > 0 {
list := string(output)
if strings.Contains(list, InTransitEncryptionPackageName) {
return true, nil
}
return false, nil
}
return false, nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// isMountPointMatch to evaluate whether the directory is a mountpoint.
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint.
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact.
if notMnt == false {
return notMnt, nil
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts.
resolvedFile, err := filepath.EvalSymlinks(file)
if err != nil {
return true, err
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types.
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if isMountPointMatch(mp, resolvedFile) {
notMnt = false
break
}
}
return notMnt, nil
}
// isMountPointMatch returns true if the path in mp is the same as dir.
// Handles case where mountpoint dir has been renamed due to stale NFS mount.
func isMountPointMatch(mp MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return ((mp.Path == dir) || (mp.Path == deletedDir))
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(mounter.logger, pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(logger *zap.SugaredLogger, pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
logger.With("path", pathname).Warn("Path does not refer to a device.")
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter.logger, mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) {
hash := fnv.New32a()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
mounter.Logger = mounter.Logger.With(
"source", source,
"target", target,
"fstype", fstype,
"options", options,
)
// Run fsck on the disk to fix repairable issues
mounter.Logger.Info("Checking disk for issues using 'fsck'.")
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
mounter.Logger = mounter.Logger.With("output", out)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
mounter.Logger.Info("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
mounter.Logger.Info("Device has errors that were corrected with 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
mounter.Logger.Info("'fsck' found errors on device but was unable to correct them.")
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
mounter.Logger.Error("'fsck' error.")
}
}
// Try to mount the disk
mounter.Logger.Info("Attempting to mount disk.")
mountErr := mounter.Interface.Mount(source, target, fstype, options)
if mountErr != nil {
// Mount failed. This indicates either that the disk is unformatted or
// it contains an unexpected filesystem.
existingFormat, err := mounter.getDiskFormat(source)
if err != nil {
return err
}
if existingFormat == "" {
// Disk is unformatted so format it.
args = []string{source}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-F", source}
}
mounter.Logger.With("argruments", args).Info("Disk appears to be unformatted, attempting to format.")
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
mounter.Logger.Info("Disk successfully formatted.")
return mounter.Interface.Mount(source, target, fstype, options)
}
mounter.Logger.With(zap.Error(err)).Error("Format of disk failed.")
return err
} else {
// Disk is already formatted and failed to mount
if len(fstype) == 0 || fstype == existingFormat {
// This is mount error
return mountErr
} else {
// Block device is formatted with unexpected filesystem, let the user know
return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr)
}
}
}
return mountErr
}
// getDiskFormat uses 'blkid' to determine a given disk's format
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
mounter.Logger.Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
cmd := mounter.Runner.Command("blkid", args...)
dataOut, err := cmd.CombinedOutput()
output := string(dataOut)
mounter.Logger.Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
mounter.Logger.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
var fstype, pttype string
lines := strings.Split(output, "\n")
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(pttype) > 0 {
mounter.Logger.Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
return fstype, nil
}
func (mounter *SafeFormatAndMount) resize(devicePath string, volumePath string) (bool, error) {
format, err := mounter.getDiskFormat(devicePath)
if err != nil {
formatErr := fmt.Errorf("error checking format for device %s: %v", devicePath, err)
return false, formatErr
}
// If disk has no format, there is no need to resize the disk because mkfs.*
// by default will use whole disk anyways.
if format == "" {
return false, nil
}
mounter.Logger.With("devicePath", devicePath).Infof("Expanding mounted volume")
switch format {
case "ext3", "ext4":
return mounter.extResize(devicePath)
case "xfs":
return mounter.xfsResize(volumePath)
}
return false, fmt.Errorf("resize of format %s is not supported for device %s mounted at %s", format, devicePath, volumePath)
}
func (mounter *SafeFormatAndMount) extResize(devicePath string) (bool, error) {
cmd := mounter.Runner.Command("resize2fs", devicePath)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("devicePath", devicePath).Infof("Device resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) xfsResize(deviceMountPath string) (bool, error) {
args := []string{"-d", deviceMountPath}
cmd := mounter.Runner.Command("xfs_growfs", args...)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("deviceMountPath", deviceMountPath).Infof("Device %s resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) rescan(devicePath string) error {
lsblkargs := []string{"-n", "-o", "NAME", devicePath}
lsblkcmd := mounter.Runner.Command("lsblk", lsblkargs...)
lsblkoutput, err := lsblkcmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to find device name associated with devicePath %s", devicePath)
}
deviceName := strings.TrimSpace(string(lsblkoutput))
if strings.HasPrefix(deviceName, "/dev/") {
deviceName = strings.TrimPrefix(deviceName, "/dev/")
}
mounter.Logger.With("deviceName", deviceName).Info("Rescanning")
// run command dd iflag=direct if=/dev/<device_name> of=/dev/null count=1
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
devicePathFileArg := fmt.Sprintf("if=%s", devicePath)
args := []string{"iflag=direct", devicePathFileArg, "of=/dev/null", "count=1"}
cmd := mounter.Runner.Command("dd", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "dd", string(output))
}
mounter.Logger.With("command", "dd", "output", string(output)).Debug("dd output")
// run command echo 1 | tee /sys/class/block/%s/device/rescan
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
cmdStr := fmt.Sprintf("echo 1 | tee /sys/class/block/%s/device/rescan", deviceName)
cmd = mounter.Runner.Command("bash", "-c", cmdStr)
output, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, cmdStr, string(output))
}
mounter.Logger.With("command", cmdStr, "output", string(output)).Debug("rescan output")
return nil
}
func (mounter *SafeFormatAndMount) getBlockSizeBytes(devicePath string) (int64, error) {
args := []string{"--getsize64", devicePath}
cmd := mounter.Runner.Command("blockdev", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return -1, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "blockdev", string(output))
}
strOut := strings.TrimSpace(string(output))
mounter.Logger.With("devicePath", devicePath, "command", "blockdev", "output", strOut).Debugf("Get block device size in bytes successful")
gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64)
if err != nil {
return -1, fmt.Errorf("failed to parse size %s into an int64 size", strOut)
}
return gotSizeBytes, nil
}
| Unmount | identifier_name |
mount_linux.go | // +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/fnv"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"path/filepath"
"go.uber.org/zap"
utilexec "k8s.io/utils/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
FIPS_ENABLED_FILE_PATH = "/host/proc/sys/crypto/fips_enabled"
ENCRYPTED_UMOUNT_COMMAND = "umount.oci-fss"
UMOUNT_COMMAND = "umount"
FINDMNT_COMMAND = "findmnt"
CAT_COMMAND = "cat"
RPM_COMMAND = "rpm"
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
logger *zap.SugaredLogger
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
// All Linux distros are expected to be shipped with a mount utility that an support bind mounts.
mounterPath := ""
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
}
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := []string{"nfs", "glusterfs", "ceph", "cifs"}
for _, fst := range fsTypesNeedMounter {
if fst == fstype {
mounterPath = mounter.mounterPath
}
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, options)
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
func doMount(logger *zap.SugaredLogger, mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
mountArgs := makeMountArgs(source, target, fstype, options)
if len(mounterPath) > 0 {
mountArgs = append([]string{mountCmd}, mountArgs...)
mountCmd = mounterPath
}
logger.With("command", mountCmd, "args", mountArgs).Info("Mounting")
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
logger.With(
zap.Error(err),
"command", mountCmd,
"source", source,
"target", target,
"fsType", fstype,
"options", options,
"output", string(output),
).Error("Mount failed.")
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %v\n",
err, mountCmd, source, target, fstype, options, string(output))
}
logger.Debugf("Mount output: %v", string(output))
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 |
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
return mounter.unmount(target, UMOUNT_COMMAND)
}
func (mounter *Mounter) unmount(target string, unmountCommand string) error {
mounter.logger.With("target", target).Info("Unmounting.")
command := exec.Command(unmountCommand, target)
output, err := command.CombinedOutput()
if err != nil {
mounter.logger.With(
zap.Error(err),
"command", unmountCommand,
"target", target,
"output", string(output),
).Error("Unmount failed.")
return fmt.Errorf("Unmount failed: %v\nUnmounting command: %s\nUnmounting arguments: %s\nOutput: %v\n", err, unmountCommand, target, string(output))
}
mounter.logger.Debugf("unmount output: %v", string(output))
return nil
}
// Unmount unmounts the target.
func (mounter *Mounter) UnmountWithEncrypt(target string) error {
return mounter.unmount(target, ENCRYPTED_UMOUNT_COMMAND)
}
func FindMount(mounter Interface, target string) ([]string, error) {
mountArgs := []string{"-n", "-o", "SOURCE", "-T", target}
command := exec.Command(FINDMNT_COMMAND, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("findmnt failed: %v\narguments: %s\nOutput: %v\n", err, mountArgs, string(output))
}
sources := strings.Fields(string(output))
return sources, nil
}
func IsFipsEnabled(mounter Interface) (string, error) {
command := exec.Command(CAT_COMMAND, FIPS_ENABLED_FILE_PATH)
output, err := command.CombinedOutput()
if err != nil {
return "", fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, CAT_COMMAND, string(output))
}
return string(output), nil
}
func IsInTransitEncryptionPackageInstalled(mounter Interface) (bool, error) {
args := []string{"-q", "-a", "--root=/host"}
command := exec.Command(RPM_COMMAND, args...)
output, err := command.CombinedOutput()
if err != nil {
return false, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, RPM_COMMAND, string(output))
}
if len(output) > 0 {
list := string(output)
if strings.Contains(list, InTransitEncryptionPackageName) {
return true, nil
}
return false, nil
}
return false, nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// isMountPointMatch to evaluate whether the directory is a mountpoint.
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint.
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact.
if notMnt == false {
return notMnt, nil
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts.
resolvedFile, err := filepath.EvalSymlinks(file)
if err != nil {
return true, err
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types.
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if isMountPointMatch(mp, resolvedFile) {
notMnt = false
break
}
}
return notMnt, nil
}
// isMountPointMatch returns true if the path in mp is the same as dir.
// Handles case where mountpoint dir has been renamed due to stale NFS mount.
func isMountPointMatch(mp MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return ((mp.Path == dir) || (mp.Path == deletedDir))
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(mounter.logger, pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(logger *zap.SugaredLogger, pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
logger.With("path", pathname).Warn("Path does not refer to a device.")
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter.logger, mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) {
hash := fnv.New32a()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
mounter.Logger = mounter.Logger.With(
"source", source,
"target", target,
"fstype", fstype,
"options", options,
)
// Run fsck on the disk to fix repairable issues
mounter.Logger.Info("Checking disk for issues using 'fsck'.")
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
mounter.Logger = mounter.Logger.With("output", out)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
mounter.Logger.Info("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
mounter.Logger.Info("Device has errors that were corrected with 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
mounter.Logger.Info("'fsck' found errors on device but was unable to correct them.")
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
mounter.Logger.Error("'fsck' error.")
}
}
// Try to mount the disk
mounter.Logger.Info("Attempting to mount disk.")
mountErr := mounter.Interface.Mount(source, target, fstype, options)
if mountErr != nil {
// Mount failed. This indicates either that the disk is unformatted or
// it contains an unexpected filesystem.
existingFormat, err := mounter.getDiskFormat(source)
if err != nil {
return err
}
if existingFormat == "" {
// Disk is unformatted so format it.
args = []string{source}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-F", source}
}
mounter.Logger.With("argruments", args).Info("Disk appears to be unformatted, attempting to format.")
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
mounter.Logger.Info("Disk successfully formatted.")
return mounter.Interface.Mount(source, target, fstype, options)
}
mounter.Logger.With(zap.Error(err)).Error("Format of disk failed.")
return err
} else {
// Disk is already formatted and failed to mount
if len(fstype) == 0 || fstype == existingFormat {
// This is mount error
return mountErr
} else {
// Block device is formatted with unexpected filesystem, let the user know
return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr)
}
}
}
return mountErr
}
// getDiskFormat uses 'blkid' to determine a given disk's format
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
mounter.Logger.Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
cmd := mounter.Runner.Command("blkid", args...)
dataOut, err := cmd.CombinedOutput()
output := string(dataOut)
mounter.Logger.Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
mounter.Logger.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
var fstype, pttype string
lines := strings.Split(output, "\n")
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(pttype) > 0 {
mounter.Logger.Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
return fstype, nil
}
func (mounter *SafeFormatAndMount) resize(devicePath string, volumePath string) (bool, error) {
format, err := mounter.getDiskFormat(devicePath)
if err != nil {
formatErr := fmt.Errorf("error checking format for device %s: %v", devicePath, err)
return false, formatErr
}
// If disk has no format, there is no need to resize the disk because mkfs.*
// by default will use whole disk anyways.
if format == "" {
return false, nil
}
mounter.Logger.With("devicePath", devicePath).Infof("Expanding mounted volume")
switch format {
case "ext3", "ext4":
return mounter.extResize(devicePath)
case "xfs":
return mounter.xfsResize(volumePath)
}
return false, fmt.Errorf("resize of format %s is not supported for device %s mounted at %s", format, devicePath, volumePath)
}
func (mounter *SafeFormatAndMount) extResize(devicePath string) (bool, error) {
cmd := mounter.Runner.Command("resize2fs", devicePath)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("devicePath", devicePath).Infof("Device resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) xfsResize(deviceMountPath string) (bool, error) {
args := []string{"-d", deviceMountPath}
cmd := mounter.Runner.Command("xfs_growfs", args...)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("deviceMountPath", deviceMountPath).Infof("Device %s resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) rescan(devicePath string) error {
lsblkargs := []string{"-n", "-o", "NAME", devicePath}
lsblkcmd := mounter.Runner.Command("lsblk", lsblkargs...)
lsblkoutput, err := lsblkcmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to find device name associated with devicePath %s", devicePath)
}
deviceName := strings.TrimSpace(string(lsblkoutput))
if strings.HasPrefix(deviceName, "/dev/") {
deviceName = strings.TrimPrefix(deviceName, "/dev/")
}
mounter.Logger.With("deviceName", deviceName).Info("Rescanning")
// run command dd iflag=direct if=/dev/<device_name> of=/dev/null count=1
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
devicePathFileArg := fmt.Sprintf("if=%s", devicePath)
args := []string{"iflag=direct", devicePathFileArg, "of=/dev/null", "count=1"}
cmd := mounter.Runner.Command("dd", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "dd", string(output))
}
mounter.Logger.With("command", "dd", "output", string(output)).Debug("dd output")
// run command echo 1 | tee /sys/class/block/%s/device/rescan
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
cmdStr := fmt.Sprintf("echo 1 | tee /sys/class/block/%s/device/rescan", deviceName)
cmd = mounter.Runner.Command("bash", "-c", cmdStr)
output, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, cmdStr, string(output))
}
mounter.Logger.With("command", cmdStr, "output", string(output)).Debug("rescan output")
return nil
}
func (mounter *SafeFormatAndMount) getBlockSizeBytes(devicePath string) (int64, error) {
args := []string{"--getsize64", devicePath}
cmd := mounter.Runner.Command("blockdev", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return -1, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "blockdev", string(output))
}
strOut := strings.TrimSpace(string(output))
mounter.Logger.With("devicePath", devicePath, "command", "blockdev", "output", strOut).Debugf("Get block device size in bytes successful")
gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64)
if err != nil {
return -1, fmt.Errorf("failed to parse size %s into an int64 size", strOut)
}
return gotSizeBytes, nil
}
| {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
} | conditional_block |
mount_linux.go | // +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/fnv"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"path/filepath"
"go.uber.org/zap"
utilexec "k8s.io/utils/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
FIPS_ENABLED_FILE_PATH = "/host/proc/sys/crypto/fips_enabled"
ENCRYPTED_UMOUNT_COMMAND = "umount.oci-fss"
UMOUNT_COMMAND = "umount"
FINDMNT_COMMAND = "findmnt"
CAT_COMMAND = "cat"
RPM_COMMAND = "rpm"
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
logger *zap.SugaredLogger
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
// All Linux distros are expected to be shipped with a mount utility that an support bind mounts.
mounterPath := ""
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
}
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := []string{"nfs", "glusterfs", "ceph", "cifs"}
for _, fst := range fsTypesNeedMounter {
if fst == fstype {
mounterPath = mounter.mounterPath
}
}
return doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, options)
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
func doMount(logger *zap.SugaredLogger, mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
mountArgs := makeMountArgs(source, target, fstype, options)
if len(mounterPath) > 0 {
mountArgs = append([]string{mountCmd}, mountArgs...)
mountCmd = mounterPath
}
logger.With("command", mountCmd, "args", mountArgs).Info("Mounting")
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
logger.With(
zap.Error(err),
"command", mountCmd,
"source", source,
"target", target,
"fsType", fstype,
"options", options,
"output", string(output),
).Error("Mount failed.")
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %v\n",
err, mountCmd, source, target, fstype, options, string(output))
}
logger.Debugf("Mount output: %v", string(output))
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
return mounter.unmount(target, UMOUNT_COMMAND)
}
func (mounter *Mounter) unmount(target string, unmountCommand string) error {
mounter.logger.With("target", target).Info("Unmounting.")
command := exec.Command(unmountCommand, target)
output, err := command.CombinedOutput()
if err != nil {
mounter.logger.With(
zap.Error(err),
"command", unmountCommand,
"target", target,
"output", string(output),
).Error("Unmount failed.")
return fmt.Errorf("Unmount failed: %v\nUnmounting command: %s\nUnmounting arguments: %s\nOutput: %v\n", err, unmountCommand, target, string(output))
}
mounter.logger.Debugf("unmount output: %v", string(output))
return nil
}
// Unmount unmounts the target.
func (mounter *Mounter) UnmountWithEncrypt(target string) error {
return mounter.unmount(target, ENCRYPTED_UMOUNT_COMMAND)
}
func FindMount(mounter Interface, target string) ([]string, error) {
mountArgs := []string{"-n", "-o", "SOURCE", "-T", target}
command := exec.Command(FINDMNT_COMMAND, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("findmnt failed: %v\narguments: %s\nOutput: %v\n", err, mountArgs, string(output))
}
sources := strings.Fields(string(output))
return sources, nil
}
func IsFipsEnabled(mounter Interface) (string, error) {
command := exec.Command(CAT_COMMAND, FIPS_ENABLED_FILE_PATH)
output, err := command.CombinedOutput()
if err != nil {
return "", fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, CAT_COMMAND, string(output))
}
return string(output), nil
}
func IsInTransitEncryptionPackageInstalled(mounter Interface) (bool, error) {
args := []string{"-q", "-a", "--root=/host"}
command := exec.Command(RPM_COMMAND, args...)
output, err := command.CombinedOutput()
if err != nil {
return false, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, RPM_COMMAND, string(output))
}
if len(output) > 0 {
list := string(output)
if strings.Contains(list, InTransitEncryptionPackageName) {
return true, nil
}
return false, nil
}
return false, nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// isMountPointMatch to evaluate whether the directory is a mountpoint.
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint.
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact.
if notMnt == false {
return notMnt, nil
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts.
resolvedFile, err := filepath.EvalSymlinks(file)
if err != nil {
return true, err
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types.
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if isMountPointMatch(mp, resolvedFile) {
notMnt = false
break
}
}
return notMnt, nil
}
// isMountPointMatch returns true if the path in mp is the same as dir.
// Handles case where mountpoint dir has been renamed due to stale NFS mount.
func isMountPointMatch(mp MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return ((mp.Path == dir) || (mp.Path == deletedDir))
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(mounter.logger, pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(logger *zap.SugaredLogger, pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
logger.With("path", pathname).Warn("Path does not refer to a device.")
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter.logger, mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) |
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
mounter.Logger = mounter.Logger.With(
"source", source,
"target", target,
"fstype", fstype,
"options", options,
)
// Run fsck on the disk to fix repairable issues
mounter.Logger.Info("Checking disk for issues using 'fsck'.")
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
mounter.Logger = mounter.Logger.With("output", out)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
mounter.Logger.Info("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
mounter.Logger.Info("Device has errors that were corrected with 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
mounter.Logger.Info("'fsck' found errors on device but was unable to correct them.")
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
mounter.Logger.Error("'fsck' error.")
}
}
// Try to mount the disk
mounter.Logger.Info("Attempting to mount disk.")
mountErr := mounter.Interface.Mount(source, target, fstype, options)
if mountErr != nil {
// Mount failed. This indicates either that the disk is unformatted or
// it contains an unexpected filesystem.
existingFormat, err := mounter.getDiskFormat(source)
if err != nil {
return err
}
if existingFormat == "" {
// Disk is unformatted so format it.
args = []string{source}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-F", source}
}
mounter.Logger.With("argruments", args).Info("Disk appears to be unformatted, attempting to format.")
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
mounter.Logger.Info("Disk successfully formatted.")
return mounter.Interface.Mount(source, target, fstype, options)
}
mounter.Logger.With(zap.Error(err)).Error("Format of disk failed.")
return err
} else {
// Disk is already formatted and failed to mount
if len(fstype) == 0 || fstype == existingFormat {
// This is mount error
return mountErr
} else {
// Block device is formatted with unexpected filesystem, let the user know
return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr)
}
}
}
return mountErr
}
// getDiskFormat uses 'blkid' to determine a given disk's format
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
mounter.Logger.Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
cmd := mounter.Runner.Command("blkid", args...)
dataOut, err := cmd.CombinedOutput()
output := string(dataOut)
mounter.Logger.Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
mounter.Logger.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
var fstype, pttype string
lines := strings.Split(output, "\n")
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(pttype) > 0 {
mounter.Logger.Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
return fstype, nil
}
func (mounter *SafeFormatAndMount) resize(devicePath string, volumePath string) (bool, error) {
format, err := mounter.getDiskFormat(devicePath)
if err != nil {
formatErr := fmt.Errorf("error checking format for device %s: %v", devicePath, err)
return false, formatErr
}
// If disk has no format, there is no need to resize the disk because mkfs.*
// by default will use whole disk anyways.
if format == "" {
return false, nil
}
mounter.Logger.With("devicePath", devicePath).Infof("Expanding mounted volume")
switch format {
case "ext3", "ext4":
return mounter.extResize(devicePath)
case "xfs":
return mounter.xfsResize(volumePath)
}
return false, fmt.Errorf("resize of format %s is not supported for device %s mounted at %s", format, devicePath, volumePath)
}
func (mounter *SafeFormatAndMount) extResize(devicePath string) (bool, error) {
cmd := mounter.Runner.Command("resize2fs", devicePath)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("devicePath", devicePath).Infof("Device resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) xfsResize(deviceMountPath string) (bool, error) {
args := []string{"-d", deviceMountPath}
cmd := mounter.Runner.Command("xfs_growfs", args...)
output, err := cmd.CombinedOutput()
if err == nil {
mounter.Logger.With("deviceMountPath", deviceMountPath).Infof("Device %s resized successfully")
return true, nil
}
resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output))
return false, resizeError
}
func (mounter *SafeFormatAndMount) rescan(devicePath string) error {
lsblkargs := []string{"-n", "-o", "NAME", devicePath}
lsblkcmd := mounter.Runner.Command("lsblk", lsblkargs...)
lsblkoutput, err := lsblkcmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to find device name associated with devicePath %s", devicePath)
}
deviceName := strings.TrimSpace(string(lsblkoutput))
if strings.HasPrefix(deviceName, "/dev/") {
deviceName = strings.TrimPrefix(deviceName, "/dev/")
}
mounter.Logger.With("deviceName", deviceName).Info("Rescanning")
// run command dd iflag=direct if=/dev/<device_name> of=/dev/null count=1
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
devicePathFileArg := fmt.Sprintf("if=%s", devicePath)
args := []string{"iflag=direct", devicePathFileArg, "of=/dev/null", "count=1"}
cmd := mounter.Runner.Command("dd", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "dd", string(output))
}
mounter.Logger.With("command", "dd", "output", string(output)).Debug("dd output")
// run command echo 1 | tee /sys/class/block/%s/device/rescan
// https://docs.oracle.com/en-us/iaas/Content/Block/Tasks/rescanningdisk.htm#Rescanni
cmdStr := fmt.Sprintf("echo 1 | tee /sys/class/block/%s/device/rescan", deviceName)
cmd = mounter.Runner.Command("bash", "-c", cmdStr)
output, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, cmdStr, string(output))
}
mounter.Logger.With("command", cmdStr, "output", string(output)).Debug("rescan output")
return nil
}
func (mounter *SafeFormatAndMount) getBlockSizeBytes(devicePath string) (int64, error) {
args := []string{"--getsize64", devicePath}
cmd := mounter.Runner.Command("blockdev", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return -1, fmt.Errorf("command failed: %v\narguments: %s\nOutput: %v\n", err, "blockdev", string(output))
}
strOut := strings.TrimSpace(string(output))
mounter.Logger.With("devicePath", devicePath, "command", "blockdev", "output", strOut).Debugf("Get block device size in bytes successful")
gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64)
if err != nil {
return -1, fmt.Errorf("failed to parse size %s into an int64 size", strOut)
}
return gotSizeBytes, nil
}
| {
hash := fnv.New32a()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
} | identifier_body |
siamese_output_result.py | """train a specific model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import dataset
import math
import numpy as np
import cv2
import os
import feature_extractor
import time
from data_transformation import resize_to_range, corp_image
from build_distance import similarity_prob_for_one_query
slim = tf.contrib.slim
##### train configs #####
tf.app.flags.DEFINE_string('gpu', '0', 'CUDA_VISIBLE_DEVICES')
tf.app.flags.DEFINE_string(
'cfg_file', None,
'cfg file path, cfg file contains paremeters for training')
tf.app.flags.DEFINE_string(
'output_dir', None,
'output dir to save ckpts and summaries.')
tf.app.flags.DEFINE_multi_float(
'new_whale_prob', [0.5, 0.4, 0.3, 0.2, 0.1], 'prob of new_whale')
tf.app.flags.DEFINE_string(
'ref_images_set', None,
'reference set')
tf.app.flags.DEFINE_string(
'dut_images_set', None,
'images set for test')
tf.app.flags.DEFINE_bool(
'save_features', False,
'whether save features before compare, mainly for debugging')
FLAGS = tf.app.flags.FLAGS
#########################
#########################
def _var_to_restore(exclude_scopes):
if exclude_scopes is None:
return slim.get_model_variables()
model_variables = slim.get_model_variables()
vars_to_restore = []
ec_scopes = [s.strip() for s in exclude_scopes.split(',')]
for mv in model_variables:
flag = True
for es in ec_scopes:
if mv.op.name.startswith(es):
flag = False
break
if flag:
vars_to_restore.append(mv)
return vars_to_restore
def _cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
cfg = yaml.load(f)
return cfg
def _parser_humpback_whale(record, phase='train'):
with tf.name_scope('parser_humpback_whale'):
features = tf.parse_single_example(
serialized=record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'image_name': tf.FixedLenFeature([], tf.string),
'class_name': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
}
)
image = tf.image.decode_jpeg(features['image'], channels=3)
label = features['label']
image_name = features['image_name']
class_name = features['class_name']
height = features['height']
width = features['width']
return image, label, image_name, class_name, height, width
def _get_tfrecord_names(folder, split):
tfrecord_files = []
files_list = os.listdir(folder)
for f in files_list:
if (split in f) and ('.tfrecord' in f):
tfrecord_files.append(os.path.join(folder, f))
return tfrecord_files
def main(_):
tf_record_base = '/home/westwell/Desktop/dolores_storage/humpback_whale_identification/' \
'data/all/tfrecord_single_image/'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
if FLAGS.cfg_file is None:
raise ValueError('You must supply the cfg file !')
cfg = _cfg_from_file(FLAGS.cfg_file)
train_cfg = cfg['train']
# print all configs
print('############################ cfg ############################')
for k in cfg:
print('%s: %s'%(k, cfg[k]))
tf.logging.set_verbosity(tf.logging.INFO)
#######################################################################
############## sigle GPU version ##############
#######################################################################
#### get features ####
input_image = tf.placeholder(tf.uint8, shape=[None, None, 3], name='input_image')
image = resize_to_range(input_image, cfg['min_resize_value'], cfg['max_resize_value'])
image = corp_image(image, cfg['corp_size'], random_crop=False)
image = tf.expand_dims(image, axis=0)
feature_for_dst, _ = feature_extractor.extract_features(
images=image,
num_classes=None,
output_stride=cfg['output_stride'],
global_pool=True,
model_variant=cfg['model_variant'],
weight_decay=0.0,
dropout_keep_prob=1.0,
regularize_depthwise=False,
reuse=tf.AUTO_REUSE,
is_training=False,
fine_tune_batch_norm=False,
cfg=cfg)
if len(feature_for_dst.shape) == 4:
feature_for_dst = tf.squeeze(
feature_for_dst, axis=[1,2], name='features_for_dst')
elif len(feature_for_dst.shape) == 2:
feature_for_dst = tf.identity(feature_for_dst, name='features_for_dst')
else:
raise Exception('feature_for_dst shape not right, got %s'%(feature_for_dst.shape))
#### get similarity probs of two features ####
ref_features = tf.placeholder(
tf.float32, shape=[None, feature_for_dst.shape[-1]], name='ref_features')
dut_feature = tf.placeholder(
tf.float32, shape=[1, feature_for_dst.shape[-1]], name='dut_features')
prob_same_ids = similarity_prob_for_one_query(
ref_features=ref_features,
dut_feature=dut_feature,
d_cfg=cfg['distance_config'],
scope='similarity_prob_for_one_query')
#### set up session config ####
# session config:
sess_cfg = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
#### do test the model ####
with tf.Session(config=sess_cfg) as sess:
# init
#sess.run(tf.global_variables_initializer())
#sess.run(tf.local_variables_initializer())
# restore vars from pretrained ckpt:
vars_to_restore = _var_to_restore(None)
for v in vars_to_restore:
print(v.op.name)
restor_saver = tf.train.Saver(var_list=vars_to_restore)
restor_saver.restore(sess, tf.train.latest_checkpoint(FLAGS.output_dir))
# forward all ref images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.ref_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
ref_image, _, ref_image_name, ref_class_name, _, _ = iterator.get_next()
all_ref_features = None
all_ref_cls_name = []
all_ref_images_name = []
i = 0
while True:
try:
one_ref_image, one_ref_image_name, one_ref_class_name = sess.run(
[ref_image, ref_image_name, ref_class_name])
if i % 100 == 0:
print(i, one_ref_class_name)
all_ref_cls_name.append(one_ref_class_name)
all_ref_images_name.append(one_ref_image_name)
one_ref_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_ref_image})
if all_ref_features is None:
|
else:
all_ref_features = np.concatenate(
(all_ref_features, one_ref_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward ref images')
break
if FLAGS.save_features:
ref_concated = np.concatenate(
(all_ref_features,
np.array(all_ref_images_name).reshape((all_ref_features.shape[0],1)),
np.array(all_ref_cls_name).reshape((all_ref_features.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'ref_concated_%s.npy'%(FLAGS.ref_images_set)),
ref_concated)
all_ref_cls_name.append('new_whale'.encode(encoding='utf-8'))
# forward all test images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.dut_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
dut_image, _, dut_image_name, dut_class_name, _, _ = iterator.get_next()
all_dut_featurs = None
all_dut_cls_name = []
all_dut_image_names = []
i = 0
while True:
try:
one_dut_image, one_dut_image_name, one_dut_class_name = sess.run(
[dut_image, dut_image_name, dut_class_name])
if i % 100 == 0:
print(i, one_dut_image_name)
all_dut_cls_name.append(one_dut_class_name)
all_dut_image_names.append(one_dut_image_name)
one_dut_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_dut_image})
if all_dut_featurs is None:
all_dut_featurs = one_dut_feature
else:
all_dut_featurs = np.concatenate(
(all_dut_featurs, one_dut_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward dut images')
break
if FLAGS.save_features:
dut_concated = np.concatenate(
(all_dut_featurs,
np.array(all_dut_image_names).reshape((all_dut_featurs.shape[0],1)),
np.array(all_dut_cls_name).reshape((all_dut_featurs.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'dut_concated_%s.npy' % (FLAGS.dut_images_set)),
dut_concated)
# got prob_same_id for every test image and write result
# submission file
for nw_prob in FLAGS.new_whale_prob:
output_file_path = os.path.join(
FLAGS.output_dir, '..',
'submission_%s_%s.csv'%(nw_prob, time.time()))
if os.path.isfile(output_file_path):
raise Exception("submission file exists!! : %s" % (output_file_path))
with open(output_file_path, 'w') as f:
f.write('Image,Id\n')
for i in range(len(all_dut_image_names)):
one_prob_same_ids = sess.run(
tf.get_default_graph().get_tensor_by_name(
'similarity_prob_for_one_query/prob_same_ids:0'),
feed_dict={'ref_features:0': all_ref_features,
'dut_features:0': np.expand_dims(all_dut_featurs[i],axis=0)})
one_prob_same_ids = np.concatenate(
(np.squeeze(one_prob_same_ids), [nw_prob]), axis=0)
if i %100 == 0:
print('compare with: %f'%(nw_prob), i, all_dut_image_names[i],
one_prob_same_ids.min(), one_prob_same_ids.max())
one_order = np.argsort(one_prob_same_ids)[::-1] # prob index
one_order = one_order.tolist()
one_predictions = []
for idx in one_order:
tmp_prediction = all_ref_cls_name[idx]
if tmp_prediction not in one_predictions:
one_predictions.append(tmp_prediction)
if len(one_predictions) == 5: # write one result
with open(output_file_path, 'a') as f:
content = os.path.basename(all_dut_image_names[i].decode()) + ','
for j in range(len(one_predictions)):
if j == 0:
content = content + one_predictions[j].decode()
else:
content = content + ' ' + one_predictions[j].decode()
content = content + '\n'
f.write(content)
break # finish on dut image
i += 1
if __name__ == '__main__':
tf.app.run()
| all_ref_features = one_ref_feature | conditional_block |
siamese_output_result.py | """train a specific model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import dataset
import math
import numpy as np
import cv2
import os
import feature_extractor
import time
from data_transformation import resize_to_range, corp_image
from build_distance import similarity_prob_for_one_query
slim = tf.contrib.slim
##### train configs #####
tf.app.flags.DEFINE_string('gpu', '0', 'CUDA_VISIBLE_DEVICES')
tf.app.flags.DEFINE_string(
'cfg_file', None,
'cfg file path, cfg file contains paremeters for training')
tf.app.flags.DEFINE_string(
'output_dir', None,
'output dir to save ckpts and summaries.')
tf.app.flags.DEFINE_multi_float(
'new_whale_prob', [0.5, 0.4, 0.3, 0.2, 0.1], 'prob of new_whale')
tf.app.flags.DEFINE_string(
'ref_images_set', None,
'reference set')
tf.app.flags.DEFINE_string(
'dut_images_set', None,
'images set for test')
tf.app.flags.DEFINE_bool(
'save_features', False,
'whether save features before compare, mainly for debugging')
FLAGS = tf.app.flags.FLAGS
#########################
#########################
def _var_to_restore(exclude_scopes):
if exclude_scopes is None:
return slim.get_model_variables()
model_variables = slim.get_model_variables()
vars_to_restore = []
ec_scopes = [s.strip() for s in exclude_scopes.split(',')]
for mv in model_variables:
flag = True
for es in ec_scopes:
if mv.op.name.startswith(es):
flag = False
break
if flag:
vars_to_restore.append(mv)
return vars_to_restore
def | (filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
cfg = yaml.load(f)
return cfg
def _parser_humpback_whale(record, phase='train'):
with tf.name_scope('parser_humpback_whale'):
features = tf.parse_single_example(
serialized=record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'image_name': tf.FixedLenFeature([], tf.string),
'class_name': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
}
)
image = tf.image.decode_jpeg(features['image'], channels=3)
label = features['label']
image_name = features['image_name']
class_name = features['class_name']
height = features['height']
width = features['width']
return image, label, image_name, class_name, height, width
def _get_tfrecord_names(folder, split):
tfrecord_files = []
files_list = os.listdir(folder)
for f in files_list:
if (split in f) and ('.tfrecord' in f):
tfrecord_files.append(os.path.join(folder, f))
return tfrecord_files
def main(_):
tf_record_base = '/home/westwell/Desktop/dolores_storage/humpback_whale_identification/' \
'data/all/tfrecord_single_image/'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
if FLAGS.cfg_file is None:
raise ValueError('You must supply the cfg file !')
cfg = _cfg_from_file(FLAGS.cfg_file)
train_cfg = cfg['train']
# print all configs
print('############################ cfg ############################')
for k in cfg:
print('%s: %s'%(k, cfg[k]))
tf.logging.set_verbosity(tf.logging.INFO)
#######################################################################
############## sigle GPU version ##############
#######################################################################
#### get features ####
input_image = tf.placeholder(tf.uint8, shape=[None, None, 3], name='input_image')
image = resize_to_range(input_image, cfg['min_resize_value'], cfg['max_resize_value'])
image = corp_image(image, cfg['corp_size'], random_crop=False)
image = tf.expand_dims(image, axis=0)
feature_for_dst, _ = feature_extractor.extract_features(
images=image,
num_classes=None,
output_stride=cfg['output_stride'],
global_pool=True,
model_variant=cfg['model_variant'],
weight_decay=0.0,
dropout_keep_prob=1.0,
regularize_depthwise=False,
reuse=tf.AUTO_REUSE,
is_training=False,
fine_tune_batch_norm=False,
cfg=cfg)
if len(feature_for_dst.shape) == 4:
feature_for_dst = tf.squeeze(
feature_for_dst, axis=[1,2], name='features_for_dst')
elif len(feature_for_dst.shape) == 2:
feature_for_dst = tf.identity(feature_for_dst, name='features_for_dst')
else:
raise Exception('feature_for_dst shape not right, got %s'%(feature_for_dst.shape))
#### get similarity probs of two features ####
ref_features = tf.placeholder(
tf.float32, shape=[None, feature_for_dst.shape[-1]], name='ref_features')
dut_feature = tf.placeholder(
tf.float32, shape=[1, feature_for_dst.shape[-1]], name='dut_features')
prob_same_ids = similarity_prob_for_one_query(
ref_features=ref_features,
dut_feature=dut_feature,
d_cfg=cfg['distance_config'],
scope='similarity_prob_for_one_query')
#### set up session config ####
# session config:
sess_cfg = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
#### do test the model ####
with tf.Session(config=sess_cfg) as sess:
# init
#sess.run(tf.global_variables_initializer())
#sess.run(tf.local_variables_initializer())
# restore vars from pretrained ckpt:
vars_to_restore = _var_to_restore(None)
for v in vars_to_restore:
print(v.op.name)
restor_saver = tf.train.Saver(var_list=vars_to_restore)
restor_saver.restore(sess, tf.train.latest_checkpoint(FLAGS.output_dir))
# forward all ref images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.ref_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
ref_image, _, ref_image_name, ref_class_name, _, _ = iterator.get_next()
all_ref_features = None
all_ref_cls_name = []
all_ref_images_name = []
i = 0
while True:
try:
one_ref_image, one_ref_image_name, one_ref_class_name = sess.run(
[ref_image, ref_image_name, ref_class_name])
if i % 100 == 0:
print(i, one_ref_class_name)
all_ref_cls_name.append(one_ref_class_name)
all_ref_images_name.append(one_ref_image_name)
one_ref_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_ref_image})
if all_ref_features is None:
all_ref_features = one_ref_feature
else:
all_ref_features = np.concatenate(
(all_ref_features, one_ref_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward ref images')
break
if FLAGS.save_features:
ref_concated = np.concatenate(
(all_ref_features,
np.array(all_ref_images_name).reshape((all_ref_features.shape[0],1)),
np.array(all_ref_cls_name).reshape((all_ref_features.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'ref_concated_%s.npy'%(FLAGS.ref_images_set)),
ref_concated)
all_ref_cls_name.append('new_whale'.encode(encoding='utf-8'))
# forward all test images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.dut_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
dut_image, _, dut_image_name, dut_class_name, _, _ = iterator.get_next()
all_dut_featurs = None
all_dut_cls_name = []
all_dut_image_names = []
i = 0
while True:
try:
one_dut_image, one_dut_image_name, one_dut_class_name = sess.run(
[dut_image, dut_image_name, dut_class_name])
if i % 100 == 0:
print(i, one_dut_image_name)
all_dut_cls_name.append(one_dut_class_name)
all_dut_image_names.append(one_dut_image_name)
one_dut_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_dut_image})
if all_dut_featurs is None:
all_dut_featurs = one_dut_feature
else:
all_dut_featurs = np.concatenate(
(all_dut_featurs, one_dut_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward dut images')
break
if FLAGS.save_features:
dut_concated = np.concatenate(
(all_dut_featurs,
np.array(all_dut_image_names).reshape((all_dut_featurs.shape[0],1)),
np.array(all_dut_cls_name).reshape((all_dut_featurs.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'dut_concated_%s.npy' % (FLAGS.dut_images_set)),
dut_concated)
# got prob_same_id for every test image and write result
# submission file
for nw_prob in FLAGS.new_whale_prob:
output_file_path = os.path.join(
FLAGS.output_dir, '..',
'submission_%s_%s.csv'%(nw_prob, time.time()))
if os.path.isfile(output_file_path):
raise Exception("submission file exists!! : %s" % (output_file_path))
with open(output_file_path, 'w') as f:
f.write('Image,Id\n')
for i in range(len(all_dut_image_names)):
one_prob_same_ids = sess.run(
tf.get_default_graph().get_tensor_by_name(
'similarity_prob_for_one_query/prob_same_ids:0'),
feed_dict={'ref_features:0': all_ref_features,
'dut_features:0': np.expand_dims(all_dut_featurs[i],axis=0)})
one_prob_same_ids = np.concatenate(
(np.squeeze(one_prob_same_ids), [nw_prob]), axis=0)
if i %100 == 0:
print('compare with: %f'%(nw_prob), i, all_dut_image_names[i],
one_prob_same_ids.min(), one_prob_same_ids.max())
one_order = np.argsort(one_prob_same_ids)[::-1] # prob index
one_order = one_order.tolist()
one_predictions = []
for idx in one_order:
tmp_prediction = all_ref_cls_name[idx]
if tmp_prediction not in one_predictions:
one_predictions.append(tmp_prediction)
if len(one_predictions) == 5: # write one result
with open(output_file_path, 'a') as f:
content = os.path.basename(all_dut_image_names[i].decode()) + ','
for j in range(len(one_predictions)):
if j == 0:
content = content + one_predictions[j].decode()
else:
content = content + ' ' + one_predictions[j].decode()
content = content + '\n'
f.write(content)
break # finish on dut image
i += 1
if __name__ == '__main__':
tf.app.run()
| _cfg_from_file | identifier_name |
siamese_output_result.py | """train a specific model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import dataset
import math
import numpy as np
import cv2
import os
import feature_extractor
import time
from data_transformation import resize_to_range, corp_image
from build_distance import similarity_prob_for_one_query
slim = tf.contrib.slim
##### train configs #####
tf.app.flags.DEFINE_string('gpu', '0', 'CUDA_VISIBLE_DEVICES')
tf.app.flags.DEFINE_string(
'cfg_file', None,
'cfg file path, cfg file contains paremeters for training')
tf.app.flags.DEFINE_string(
'output_dir', None,
'output dir to save ckpts and summaries.')
tf.app.flags.DEFINE_multi_float(
'new_whale_prob', [0.5, 0.4, 0.3, 0.2, 0.1], 'prob of new_whale')
tf.app.flags.DEFINE_string(
'ref_images_set', None,
'reference set')
tf.app.flags.DEFINE_string(
'dut_images_set', None,
'images set for test')
tf.app.flags.DEFINE_bool(
'save_features', False,
'whether save features before compare, mainly for debugging')
FLAGS = tf.app.flags.FLAGS
#########################
#########################
def _var_to_restore(exclude_scopes):
if exclude_scopes is None:
return slim.get_model_variables()
model_variables = slim.get_model_variables()
vars_to_restore = []
ec_scopes = [s.strip() for s in exclude_scopes.split(',')]
for mv in model_variables:
flag = True
for es in ec_scopes:
if mv.op.name.startswith(es):
flag = False
break
if flag:
vars_to_restore.append(mv)
return vars_to_restore
def _cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
cfg = yaml.load(f)
return cfg
def _parser_humpback_whale(record, phase='train'):
with tf.name_scope('parser_humpback_whale'):
features = tf.parse_single_example(
serialized=record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'image_name': tf.FixedLenFeature([], tf.string),
'class_name': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
}
)
image = tf.image.decode_jpeg(features['image'], channels=3)
label = features['label']
image_name = features['image_name']
class_name = features['class_name']
height = features['height']
width = features['width']
return image, label, image_name, class_name, height, width
def _get_tfrecord_names(folder, split):
tfrecord_files = []
files_list = os.listdir(folder)
for f in files_list:
if (split in f) and ('.tfrecord' in f):
tfrecord_files.append(os.path.join(folder, f))
return tfrecord_files
def main(_):
tf_record_base = '/home/westwell/Desktop/dolores_storage/humpback_whale_identification/' \
'data/all/tfrecord_single_image/'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
if FLAGS.cfg_file is None:
raise ValueError('You must supply the cfg file !')
cfg = _cfg_from_file(FLAGS.cfg_file)
train_cfg = cfg['train']
# print all configs
print('############################ cfg ############################')
for k in cfg:
print('%s: %s'%(k, cfg[k]))
tf.logging.set_verbosity(tf.logging.INFO)
#######################################################################
############## sigle GPU version ##############
#######################################################################
#### get features ####
input_image = tf.placeholder(tf.uint8, shape=[None, None, 3], name='input_image')
image = resize_to_range(input_image, cfg['min_resize_value'], cfg['max_resize_value'])
image = corp_image(image, cfg['corp_size'], random_crop=False)
image = tf.expand_dims(image, axis=0)
feature_for_dst, _ = feature_extractor.extract_features(
images=image,
num_classes=None,
output_stride=cfg['output_stride'],
global_pool=True,
model_variant=cfg['model_variant'],
weight_decay=0.0,
dropout_keep_prob=1.0,
regularize_depthwise=False,
reuse=tf.AUTO_REUSE,
is_training=False,
fine_tune_batch_norm=False,
cfg=cfg)
if len(feature_for_dst.shape) == 4:
feature_for_dst = tf.squeeze(
feature_for_dst, axis=[1,2], name='features_for_dst')
elif len(feature_for_dst.shape) == 2:
feature_for_dst = tf.identity(feature_for_dst, name='features_for_dst')
else:
raise Exception('feature_for_dst shape not right, got %s'%(feature_for_dst.shape))
#### get similarity probs of two features ####
ref_features = tf.placeholder(
tf.float32, shape=[None, feature_for_dst.shape[-1]], name='ref_features')
dut_feature = tf.placeholder(
tf.float32, shape=[1, feature_for_dst.shape[-1]], name='dut_features')
prob_same_ids = similarity_prob_for_one_query(
ref_features=ref_features,
dut_feature=dut_feature,
d_cfg=cfg['distance_config'],
scope='similarity_prob_for_one_query')
#### set up session config ####
# session config:
sess_cfg = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
#### do test the model ####
with tf.Session(config=sess_cfg) as sess:
# init
#sess.run(tf.global_variables_initializer())
#sess.run(tf.local_variables_initializer())
# restore vars from pretrained ckpt:
vars_to_restore = _var_to_restore(None)
for v in vars_to_restore:
print(v.op.name)
restor_saver = tf.train.Saver(var_list=vars_to_restore)
restor_saver.restore(sess, tf.train.latest_checkpoint(FLAGS.output_dir))
# forward all ref images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.ref_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
ref_image, _, ref_image_name, ref_class_name, _, _ = iterator.get_next()
all_ref_features = None
all_ref_cls_name = []
all_ref_images_name = []
i = 0
while True:
try:
one_ref_image, one_ref_image_name, one_ref_class_name = sess.run(
[ref_image, ref_image_name, ref_class_name])
if i % 100 == 0:
print(i, one_ref_class_name)
all_ref_cls_name.append(one_ref_class_name)
all_ref_images_name.append(one_ref_image_name)
one_ref_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_ref_image})
if all_ref_features is None:
all_ref_features = one_ref_feature
else:
all_ref_features = np.concatenate(
(all_ref_features, one_ref_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward ref images')
break
if FLAGS.save_features:
ref_concated = np.concatenate(
(all_ref_features,
np.array(all_ref_images_name).reshape((all_ref_features.shape[0],1)),
np.array(all_ref_cls_name).reshape((all_ref_features.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'ref_concated_%s.npy'%(FLAGS.ref_images_set)),
ref_concated)
all_ref_cls_name.append('new_whale'.encode(encoding='utf-8'))
# forward all test images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.dut_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
dut_image, _, dut_image_name, dut_class_name, _, _ = iterator.get_next()
all_dut_featurs = None
all_dut_cls_name = []
all_dut_image_names = []
i = 0
while True:
try:
one_dut_image, one_dut_image_name, one_dut_class_name = sess.run(
[dut_image, dut_image_name, dut_class_name])
if i % 100 == 0:
print(i, one_dut_image_name)
all_dut_cls_name.append(one_dut_class_name)
all_dut_image_names.append(one_dut_image_name)
one_dut_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_dut_image})
if all_dut_featurs is None:
all_dut_featurs = one_dut_feature
else:
all_dut_featurs = np.concatenate(
(all_dut_featurs, one_dut_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward dut images')
break
if FLAGS.save_features:
dut_concated = np.concatenate(
(all_dut_featurs,
np.array(all_dut_image_names).reshape((all_dut_featurs.shape[0],1)),
np.array(all_dut_cls_name).reshape((all_dut_featurs.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'dut_concated_%s.npy' % (FLAGS.dut_images_set)),
dut_concated)
# got prob_same_id for every test image and write result
# submission file
for nw_prob in FLAGS.new_whale_prob:
output_file_path = os.path.join(
FLAGS.output_dir, '..',
'submission_%s_%s.csv'%(nw_prob, time.time()))
if os.path.isfile(output_file_path):
raise Exception("submission file exists!! : %s" % (output_file_path))
with open(output_file_path, 'w') as f: | 'similarity_prob_for_one_query/prob_same_ids:0'),
feed_dict={'ref_features:0': all_ref_features,
'dut_features:0': np.expand_dims(all_dut_featurs[i],axis=0)})
one_prob_same_ids = np.concatenate(
(np.squeeze(one_prob_same_ids), [nw_prob]), axis=0)
if i %100 == 0:
print('compare with: %f'%(nw_prob), i, all_dut_image_names[i],
one_prob_same_ids.min(), one_prob_same_ids.max())
one_order = np.argsort(one_prob_same_ids)[::-1] # prob index
one_order = one_order.tolist()
one_predictions = []
for idx in one_order:
tmp_prediction = all_ref_cls_name[idx]
if tmp_prediction not in one_predictions:
one_predictions.append(tmp_prediction)
if len(one_predictions) == 5: # write one result
with open(output_file_path, 'a') as f:
content = os.path.basename(all_dut_image_names[i].decode()) + ','
for j in range(len(one_predictions)):
if j == 0:
content = content + one_predictions[j].decode()
else:
content = content + ' ' + one_predictions[j].decode()
content = content + '\n'
f.write(content)
break # finish on dut image
i += 1
if __name__ == '__main__':
tf.app.run() | f.write('Image,Id\n')
for i in range(len(all_dut_image_names)):
one_prob_same_ids = sess.run(
tf.get_default_graph().get_tensor_by_name( | random_line_split |
siamese_output_result.py | """train a specific model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import dataset
import math
import numpy as np
import cv2
import os
import feature_extractor
import time
from data_transformation import resize_to_range, corp_image
from build_distance import similarity_prob_for_one_query
slim = tf.contrib.slim
##### train configs #####
tf.app.flags.DEFINE_string('gpu', '0', 'CUDA_VISIBLE_DEVICES')
tf.app.flags.DEFINE_string(
'cfg_file', None,
'cfg file path, cfg file contains paremeters for training')
tf.app.flags.DEFINE_string(
'output_dir', None,
'output dir to save ckpts and summaries.')
tf.app.flags.DEFINE_multi_float(
'new_whale_prob', [0.5, 0.4, 0.3, 0.2, 0.1], 'prob of new_whale')
tf.app.flags.DEFINE_string(
'ref_images_set', None,
'reference set')
tf.app.flags.DEFINE_string(
'dut_images_set', None,
'images set for test')
tf.app.flags.DEFINE_bool(
'save_features', False,
'whether save features before compare, mainly for debugging')
FLAGS = tf.app.flags.FLAGS
#########################
#########################
def _var_to_restore(exclude_scopes):
if exclude_scopes is None:
return slim.get_model_variables()
model_variables = slim.get_model_variables()
vars_to_restore = []
ec_scopes = [s.strip() for s in exclude_scopes.split(',')]
for mv in model_variables:
flag = True
for es in ec_scopes:
if mv.op.name.startswith(es):
flag = False
break
if flag:
vars_to_restore.append(mv)
return vars_to_restore
def _cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
cfg = yaml.load(f)
return cfg
def _parser_humpback_whale(record, phase='train'):
|
def _get_tfrecord_names(folder, split):
tfrecord_files = []
files_list = os.listdir(folder)
for f in files_list:
if (split in f) and ('.tfrecord' in f):
tfrecord_files.append(os.path.join(folder, f))
return tfrecord_files
def main(_):
tf_record_base = '/home/westwell/Desktop/dolores_storage/humpback_whale_identification/' \
'data/all/tfrecord_single_image/'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
if FLAGS.cfg_file is None:
raise ValueError('You must supply the cfg file !')
cfg = _cfg_from_file(FLAGS.cfg_file)
train_cfg = cfg['train']
# print all configs
print('############################ cfg ############################')
for k in cfg:
print('%s: %s'%(k, cfg[k]))
tf.logging.set_verbosity(tf.logging.INFO)
#######################################################################
############## sigle GPU version ##############
#######################################################################
#### get features ####
input_image = tf.placeholder(tf.uint8, shape=[None, None, 3], name='input_image')
image = resize_to_range(input_image, cfg['min_resize_value'], cfg['max_resize_value'])
image = corp_image(image, cfg['corp_size'], random_crop=False)
image = tf.expand_dims(image, axis=0)
feature_for_dst, _ = feature_extractor.extract_features(
images=image,
num_classes=None,
output_stride=cfg['output_stride'],
global_pool=True,
model_variant=cfg['model_variant'],
weight_decay=0.0,
dropout_keep_prob=1.0,
regularize_depthwise=False,
reuse=tf.AUTO_REUSE,
is_training=False,
fine_tune_batch_norm=False,
cfg=cfg)
if len(feature_for_dst.shape) == 4:
feature_for_dst = tf.squeeze(
feature_for_dst, axis=[1,2], name='features_for_dst')
elif len(feature_for_dst.shape) == 2:
feature_for_dst = tf.identity(feature_for_dst, name='features_for_dst')
else:
raise Exception('feature_for_dst shape not right, got %s'%(feature_for_dst.shape))
#### get similarity probs of two features ####
ref_features = tf.placeholder(
tf.float32, shape=[None, feature_for_dst.shape[-1]], name='ref_features')
dut_feature = tf.placeholder(
tf.float32, shape=[1, feature_for_dst.shape[-1]], name='dut_features')
prob_same_ids = similarity_prob_for_one_query(
ref_features=ref_features,
dut_feature=dut_feature,
d_cfg=cfg['distance_config'],
scope='similarity_prob_for_one_query')
#### set up session config ####
# session config:
sess_cfg = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
#### do test the model ####
with tf.Session(config=sess_cfg) as sess:
# init
#sess.run(tf.global_variables_initializer())
#sess.run(tf.local_variables_initializer())
# restore vars from pretrained ckpt:
vars_to_restore = _var_to_restore(None)
for v in vars_to_restore:
print(v.op.name)
restor_saver = tf.train.Saver(var_list=vars_to_restore)
restor_saver.restore(sess, tf.train.latest_checkpoint(FLAGS.output_dir))
# forward all ref images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.ref_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
ref_image, _, ref_image_name, ref_class_name, _, _ = iterator.get_next()
all_ref_features = None
all_ref_cls_name = []
all_ref_images_name = []
i = 0
while True:
try:
one_ref_image, one_ref_image_name, one_ref_class_name = sess.run(
[ref_image, ref_image_name, ref_class_name])
if i % 100 == 0:
print(i, one_ref_class_name)
all_ref_cls_name.append(one_ref_class_name)
all_ref_images_name.append(one_ref_image_name)
one_ref_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_ref_image})
if all_ref_features is None:
all_ref_features = one_ref_feature
else:
all_ref_features = np.concatenate(
(all_ref_features, one_ref_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward ref images')
break
if FLAGS.save_features:
ref_concated = np.concatenate(
(all_ref_features,
np.array(all_ref_images_name).reshape((all_ref_features.shape[0],1)),
np.array(all_ref_cls_name).reshape((all_ref_features.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'ref_concated_%s.npy'%(FLAGS.ref_images_set)),
ref_concated)
all_ref_cls_name.append('new_whale'.encode(encoding='utf-8'))
# forward all test images
filenames = _get_tfrecord_names(tf_record_base, FLAGS.dut_images_set)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(lambda record: _parser_humpback_whale(record, 'eval'))
dataset.batch(batch_size=1)
iterator = dataset.make_one_shot_iterator()
dut_image, _, dut_image_name, dut_class_name, _, _ = iterator.get_next()
all_dut_featurs = None
all_dut_cls_name = []
all_dut_image_names = []
i = 0
while True:
try:
one_dut_image, one_dut_image_name, one_dut_class_name = sess.run(
[dut_image, dut_image_name, dut_class_name])
if i % 100 == 0:
print(i, one_dut_image_name)
all_dut_cls_name.append(one_dut_class_name)
all_dut_image_names.append(one_dut_image_name)
one_dut_feature = sess.run(
tf.get_default_graph().get_tensor_by_name('features_for_dst:0'),
feed_dict={'input_image:0': one_dut_image})
if all_dut_featurs is None:
all_dut_featurs = one_dut_feature
else:
all_dut_featurs = np.concatenate(
(all_dut_featurs, one_dut_feature), axis=0)
i += 1
except tf.errors.OutOfRangeError:
tf.logging.info('End of forward dut images')
break
if FLAGS.save_features:
dut_concated = np.concatenate(
(all_dut_featurs,
np.array(all_dut_image_names).reshape((all_dut_featurs.shape[0],1)),
np.array(all_dut_cls_name).reshape((all_dut_featurs.shape[0], 1))),
axis=1)
np.save(
os.path.join(
FLAGS.output_dir, '..', 'dut_concated_%s.npy' % (FLAGS.dut_images_set)),
dut_concated)
# got prob_same_id for every test image and write result
# submission file
for nw_prob in FLAGS.new_whale_prob:
output_file_path = os.path.join(
FLAGS.output_dir, '..',
'submission_%s_%s.csv'%(nw_prob, time.time()))
if os.path.isfile(output_file_path):
raise Exception("submission file exists!! : %s" % (output_file_path))
with open(output_file_path, 'w') as f:
f.write('Image,Id\n')
for i in range(len(all_dut_image_names)):
one_prob_same_ids = sess.run(
tf.get_default_graph().get_tensor_by_name(
'similarity_prob_for_one_query/prob_same_ids:0'),
feed_dict={'ref_features:0': all_ref_features,
'dut_features:0': np.expand_dims(all_dut_featurs[i],axis=0)})
one_prob_same_ids = np.concatenate(
(np.squeeze(one_prob_same_ids), [nw_prob]), axis=0)
if i %100 == 0:
print('compare with: %f'%(nw_prob), i, all_dut_image_names[i],
one_prob_same_ids.min(), one_prob_same_ids.max())
one_order = np.argsort(one_prob_same_ids)[::-1] # prob index
one_order = one_order.tolist()
one_predictions = []
for idx in one_order:
tmp_prediction = all_ref_cls_name[idx]
if tmp_prediction not in one_predictions:
one_predictions.append(tmp_prediction)
if len(one_predictions) == 5: # write one result
with open(output_file_path, 'a') as f:
content = os.path.basename(all_dut_image_names[i].decode()) + ','
for j in range(len(one_predictions)):
if j == 0:
content = content + one_predictions[j].decode()
else:
content = content + ' ' + one_predictions[j].decode()
content = content + '\n'
f.write(content)
break # finish on dut image
i += 1
if __name__ == '__main__':
tf.app.run()
| with tf.name_scope('parser_humpback_whale'):
features = tf.parse_single_example(
serialized=record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'image_name': tf.FixedLenFeature([], tf.string),
'class_name': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
}
)
image = tf.image.decode_jpeg(features['image'], channels=3)
label = features['label']
image_name = features['image_name']
class_name = features['class_name']
height = features['height']
width = features['width']
return image, label, image_name, class_name, height, width | identifier_body |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if !self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while !closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input, .. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => |
})
}
}
| {} | conditional_block |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}), | }
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if !self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while !closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input, .. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
} | }
} | random_line_split |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font |
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if !self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while !closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input, .. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
}
| {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
} | identifier_body |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct | {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if !self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while !closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input, .. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
}
| Vertex2D | identifier_name |
conpty.rs | use failure::Error;
use std::io::{self, Error as IoError, Result as IoResult};
extern crate winapi;
use crate::pty::conpty::winapi::shared::minwindef::DWORD;
use crate::pty::conpty::winapi::shared::winerror::{HRESULT, S_OK};
use crate::pty::conpty::winapi::um::fileapi::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command |
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(' ' as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if !arg.is_empty()
&& !arg.encode_wide().any(|c| {
c == ' ' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res != 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res != 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res != 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle != INVALID_HANDLE_VALUE && !self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
let master = MasterPty {
inner: Arc::new(Mutex::new(Inner {
con,
readable: stdout_read,
writable: stdin_write,
size,
})),
};
let slave = SlavePty {
inner: master.inner.clone(),
};
Ok((master, slave))
}
| {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
} | identifier_body |
conpty.rs | use failure::Error;
use std::io::{self, Error as IoError, Result as IoResult};
extern crate winapi;
use crate::pty::conpty::winapi::shared::minwindef::DWORD;
use crate::pty::conpty::winapi::shared::winerror::{HRESULT, S_OK};
use crate::pty::conpty::winapi::um::fileapi::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn | (exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(' ' as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if !arg.is_empty()
&& !arg.encode_wide().any(|c| {
c == ' ' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res != 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res != 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res != 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle != INVALID_HANDLE_VALUE && !self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
let master = MasterPty {
inner: Arc::new(Mutex::new(Inner {
con,
readable: stdout_read,
writable: stdin_write,
size,
})),
};
let slave = SlavePty {
inner: master.inner.clone(),
};
Ok((master, slave))
}
| search_path | identifier_name |
conpty.rs | use failure::Error;
use std::io::{self, Error as IoError, Result as IoResult};
extern crate winapi;
use crate::pty::conpty::winapi::shared::minwindef::DWORD;
use crate::pty::conpty::winapi::shared::winerror::{HRESULT, S_OK};
use crate::pty::conpty::winapi::um::fileapi::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(' ' as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if !arg.is_empty()
&& !arg.encode_wide().any(|c| {
c == ' ' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res != 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res != 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res != 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle != INVALID_HANDLE_VALUE && !self.handle.is_null() |
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
let master = MasterPty {
inner: Arc::new(Mutex::new(Inner {
con,
readable: stdout_read,
writable: stdin_write,
size,
})),
};
let slave = SlavePty {
inner: master.inner.clone(),
};
Ok((master, slave))
}
| {
unsafe { CloseHandle(self.handle) };
} | conditional_block |
conpty.rs | use failure::Error;
use std::io::{self, Error as IoError, Result as IoResult};
extern crate winapi;
use crate::pty::conpty::winapi::shared::minwindef::DWORD;
use crate::pty::conpty::winapi::shared::winerror::{HRESULT, S_OK};
use crate::pty::conpty::winapi::um::fileapi::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(' ' as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if !arg.is_empty()
&& !arg.encode_wide().any(|c| {
c == ' ' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0; | &mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res != 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res != 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res != 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle != INVALID_HANDLE_VALUE && !self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
let master = MasterPty {
inner: Arc::new(Mutex::new(Inner {
con,
readable: stdout_read,
writable: stdin_write,
size,
})),
};
let slave = SlavePty {
inner: master.inner.clone(),
};
Ok((master, slave))
} | unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0, | random_line_split |
lfp_session.py |
import os
import probe_functions as ProbeF
import pdc_functions as PDCF
import _pickle as cPickle
import pandas as pd
import numpy as np
from functools import reduce
import dynet_statespace as dsspace
import dynet_con as dcon
import xarray as xr
import matplotlib.pyplot as plt
from functools import reduce
class LFPSession(object):
"""
Class to access, store, and retrieve LFP session data, apply pre-processing and estimate iPDC
"""
def __init__(self,cache,session_id,result_path):
"""
Initialize the class based on AllenBrainSDK session
:param cache: cache from EcephysProjectCache.from_warehouse(manifest=manifest_path)
:param session_id: ID for allenSDK session
:param result_path: Path to save the results
"""
self.session_id = session_id
# Add the resultpath folder for this session #### be careful about this variable when saving and loading (both Paths)
if not os.path.exists(os.path.join(result_path, str(self.session_id))):
os.mkdir(os.path.join(result_path, str(self.session_id)))
self.result_path = os.path.join(result_path, str(self.session_id))
# check if the LFP session already exist, load that session preprocessing info
try:
self.load_session()
except FileNotFoundError:
# self.cond_name = cond_name
self.preprocess = [] # any preprocessing is done? list of the preprocessing params
self.RF = False # Channel info is stored?
self.CSD = False # CSD plots for layer assignment are done before?
self.ROIs = {} # empty dictionary indicating cortical ROI (VIS areas) and their relative probes
self.session = cache.get_session_data(session_id) # Get allenSDK session
# variables for running time only
self.probes = dict.fromkeys(self.session.probes.index.values) # Get the probes for this session, make a dictionary maybe
self.loaded_cond = None #Load LFP option
self.layer_selected = False # if the loaded LFP is spatially down-sampled
## Class methods read/write the LFPSession from/to file (note: only preprocessing info is important)
def save_session(self):
"""
Saves session and preprocessing information to a .obj file using cPickle
:return: file path/name
"""
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
filehandler = open(filename, "wb")
# Do not save the loaded LFP matrices since they are too big
temp = self
temp.probes = dict.fromkeys(temp.probes.keys())
temp.loaded_cond = None
temp.layer_selected = False
cPickle.dump(temp.__dict__, filehandler)
filehandler.close()
return filename
def load_session(self): # be careful about this -> result_path
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
file = open(filename, 'rb')
dataPickle = file.read()
file.close()
self.__dict__ = cPickle.loads(dataPickle)
def __str__(self):
return str(self.__dict__).replace(", '", ",\n '")
## Processing methods
def preprocessing(self,cond_name='drifting_gratings', down_sample_rate=5, pre_stim = 1, do_RF=False, do_CSD=False, do_probe=False):
"""
Runs the preprocessing on the session with the input parameters, if it has not been run before.
:param cond_name: condition name to be preprocessed
:param do_RF: do receptive field mapping plots? Attention: this may take a while if set True, note it is not RF mappning based on permutation
:param down_sample_rate:
:param pre_stim: prestimulus time in sec
:return:
"""
# first indicate if the
preproc_dict = {
'cond_name': cond_name,
'srate': down_sample_rate,
'prestim': pre_stim,
}
# Attention: remove the zero conditions
if not search_preproc(self.preprocess,preproc_dict):
for probe_id in self.probes.keys():
# Load lfp data
lfp =self.session.get_lfp(probe_id)
# First extract probe info and save
if do_RF:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, do_RF)
self.RF = True
elif not self.RF or do_probe:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, False)
# CSD plot for the probe
if (not self.CSD) and do_CSD:
ProbeF.CSD_plots(self.session, lfp, probe_id, self.result_path)
# Extract and prepare the data for a condition
if cond_name is not None:
ROI = ProbeF.prepare_condition(self.session, self.session_id, lfp, probe_id, cond_name, self.result_path, pre_stim, down_sample_rate)
self.ROIs[ROI] = probe_id
# Add the pre-process params as a dictionary to the list of preprocessed data
if cond_name is not None:
self.preprocess.append(preproc_dict)
if (not self.CSD) and do_CSD:
self.CSD = True
if not self.RF or do_probe:
self.RF = True
# Save the session after preprocessing
self.save_session()
def load_LFPprobes(self, cond_dict):
"""
loads in the preprocessed LFP signal
:param cond_dict: a dictionary with the preprocessing params
:return: Updates the self.probes values
"""
preprocess_ind = search_preproc(self.preprocess, cond_dict)
if not preprocess_ind: # checks if the condition is previously run
|
cond = self.preprocess[preprocess_ind[0]]
for probe_id in self.probes.keys():
# first prepare the file name
filename = os.path.join(self.result_path, 'PrepData', '{}_{}{}_pres{}s.pkl'.format(
probe_id, cond['cond_name'], int(cond['srate']),cond['prestim']))
# second load each probe and add it to the ROI list
self.probes[probe_id] = ProbeF.LFPprobe.from_file(filename)
self.loaded_cond = cond['cond_name']
def layer_selection(self, Filename=None):
"""
This will be done on the loaded_cond data
:return:
"""
if Filename==None:
Filename = os.path.join(self.result_path,'PrepData','Cortical_Layers.xlsx')
try:
layer_table = pd.read_excel(Filename)
# set the layer names as index of the dataframe
layer_table.set_index('Layers', inplace=True)
except OSError:
# if the layer file did not exist then return with an error
print("Prepare the cortical layer files first as PrepData/Cortical_Layers.xlsx")
return
for probe_id in self.probes.keys():
print(probe_id)
#ProbeF.layer_reduction(self.probes[probe_id].Y, probe_id, self.result_path)
channel_id = ProbeF.layer_selection(layer_table, probe_id, self.result_path)
# select the LFP of those channels, and relabel the xarray dimensions
if len(channel_id) > 0:
self.probes[probe_id].Y = self.probes[probe_id].Y.sel(channel=channel_id.to_list())
else:
self.probes[probe_id].Y = []
self.layer_selected = True
def pdc_analysis(self, ROI_list=None, Mord=10, ff=.99, pdc_method='iPDC', stim_params=None, Freqs=np.array(range(1, 101)), preproc_params=None, redo = False):
"""
Calculates time- and frequency-resolved functional connectivity between the LFP signals based on STOK algorithm
:param ROI_list: list of ROIs to be considered for this analysis
:param Mord: Model order for ARMA model
:param ff: filter factor between 0 and 1
:param pdc_method: check the pydynet toolbox for that
:param stim_params: Parameters of stimulus to be used to pool the data
:param Freqs: a numpy array uncluding the Frequencies for connectivity analysis
:return:
"""
if ROI_list is None:
ROI_list = ['VISp']
if stim_params is None:
stim_params = []
#----------------------------------------------------------------------------
# Check if the PDC exist, just load it
# analysis params
PDCparam_dict = {
'ROI_list': ROI_list,
'Mord': Mord,
'ff': ff,
'pdc_method': pdc_method,
'stim_param': stim_params
}
filename = PDCF.search_PDC(self.session_id, self.result_path, PDCparam_dict, preproc_params)
if os.path.isfile(filename) and not redo:
# load the file and return it
file = open(filename, 'rb')
PDC_dict = cPickle.load(file)
file.close()
return PDC_dict
#----------------------------------------------------------------------------
# load the preprocessed LFPs and down sample spatially by selecting 6 layers
self.load_LFPprobes(preproc_params)
self.layer_selection()
# select the conditions and pool their trials together
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params= stim_params, ROI_list = ROI_list)
Y = Result_pool['Y']
Srate = Result_pool['Srate']
# pull together and ROI-layer index
srate = np.unique(np.array(list(Srate.values())))
if len(srate) != 1:
print("Sampling rates do not match between probes, please check the preprocessing!")
return
# Put the data from all ROIs together for PDC calculations
Y_temp = np.concatenate(list(Y.values()), axis=1) # second dimension is the channels
Y_temp = np.moveaxis(Y_temp, -1, 0)
YS = list(Y_temp.shape)
Y_pool = Y_temp.reshape([YS[0] * YS[1], YS[2], YS[3]])
# remove possible zero and NaN values (trials)
nzero_trl = Y_pool[:, :, 10] != 0
nzero_trl_ind = reduce((lambda x, y: np.logical_or(x, y)), nzero_trl.transpose())
nNan_trl_ind = np.isnan(Y_pool).sum(axis=2).sum(axis=1) == 0
Y_pooled = Y_pool[nzero_trl_ind & nNan_trl_ind, :, :]
# iPDC matrix
KF = dsspace.dynet_SSM_STOK(Y_pooled, p=Mord, ff=ff)
iPDC = dcon.dynet_ar2pdc(KF, srate, Freqs, metric=pdc_method, univ=1, flow=2, PSD =1)
# iPDC to xarray
Time = Y['VISp'].time.values
ROI_ls = np.array(Result_pool['ROI_labels']).reshape(np.prod(np.array(Result_pool['ROI_labels']).shape))
iPDC_xr = xr.DataArray(iPDC, dims=['target', 'source', 'freq' , 'time'],
coords=dict(target= ROI_ls, source= ROI_ls, freq=Freqs, time=Time))
# ROIs for output
ROIs = list(Y.keys())
chnl_ids = np.array(Result_pool['channel_ids']).reshape(np.prod(np.array(Result_pool['channel_ids']).shape))
prb_ids = np.array(Result_pool['probe_ids']).reshape(np.prod(np.array(Result_pool['probe_ids']).shape))
# save and return the output
PDC_dict = {'session_id':self.session_id, 'KF': KF, 'ROIs': ROIs, 'PDC': iPDC_xr,
'probe_info': {'probe_ids': prb_ids, 'channel_ids': chnl_ids}, 'PDCparam_dict': PDCparam_dict, 'preproc_dict': preproc_params}
PDCF.save_PDC(PDC_dict, self.result_path)
# save?
return PDC_dict
def pool_data(self, preproc_params=None, stim_params= None, ROI_list = None):
# select the conditions and pool their trials together
Y = {} # to prepare the data for PDC analysis
Srate = {} # to make sure that Srates match
ROI_labels = []
channel_ids = []
probe_ids = []
# All ROIs in this session
All_ROIs = [(self.probes[x].ROI, x) for x in self.probes.keys()]
for ROI in ROI_list:
# find the ROIs and the one with Layer assignment
ch_ind = [i for i, y in enumerate([x[0] for x in All_ROIs]) if y == ROI]
if bool(ch_ind): # in case of multiple recordings from the same ROI, I only labeled the one with better data
temp = [len(self.probes[All_ROIs[x][1]].Y)>0 for x in ch_ind]
Emp_ind = np.where(np.array(temp))[0]# find empty probes -> because no layer was assigned
if len(Emp_ind)>0:
ch_ind = ch_ind[Emp_ind[0]]
#ch_ind = ch_ind[temp.index(True)]
else:
ch_ind = []
if bool(ch_ind) or (ch_ind==0): #if there is a probe
probe_id = All_ROIs[ch_ind][1]
cnd_info = self.probes[probe_id].cnd_info
Cnds_inds = []
for k in stim_params.keys():
Cnds = [cnd_info[k] == x for x in stim_params[k]]
if len(Cnds) > 1:
Cnds_temp = reduce((lambda x, y: np.logical_or(x, y)), [c.to_numpy() for c in Cnds])
Cnds_inds.append(Cnds_temp)
else:
Cnds_inds.append(Cnds)
Cnds_final = np.array(reduce((lambda x, y: np.logical_and(x, y)), Cnds_inds))
Cnds_inds_final = cnd_info['stimulus_condition_id'].to_numpy()[Cnds_final.squeeze()]
# Prepare for output
Y[ROI] = self.probes[probe_id].Y.sel(cnd_id=Cnds_inds_final)
Srate[ROI] = self.probes[probe_id].srate
ROI_labels.append(['{}_L{}'.format(ROI, i) for i in range(1, 7)])
channel_ids.append(Y[ROI].channel.values)
probe_ids.append([probe_id for l in range(1, 7)])
# Set other outputs
Time = Y['VISp'].time.values
ROIs = list(Y.keys())
return {'Y': Y, 'Srate': Srate, 'ROI_labels':ROI_labels, 'channel_ids':channel_ids, 'probe_ids':probe_ids}
def plot_LFPs(self, preproc_params=None, stim_params= None, ROI_list = None, TimeWin=None):
self.load_LFPprobes(preproc_params)
self.layer_selection()
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params=stim_params, ROI_list=ROI_list)
figure_path = os.path.join(self.result_path, 'Average_LFP_{}_downs{}.png'.format(
preproc_params['cond_name'], int(preproc_params['srate'])))
colors = ROIColors('layers')
LFP_plot(Result_pool['Y'],TimeWin, colors, figure_path)
# Return averaged Y
return dict((x,y.mean(axis=(0,3))) for x,y in Result_pool['Y'].items())
def search_preproc(list_pre, dic_pre):
"""
Search if the preprocessing with the current parameters has been run before
:param list_pre: self.preprocess
:param dic_pre: dictionary with new params
:return: The index of pre-processes with current params
"""
result = []
for x in list_pre:
shared_items = [x[k] == dic_pre[k] for k in x if k in dic_pre]
result.append(sum(shared_items)==len(dic_pre))
return [i for i, x in enumerate(result) if x]
# maybe also searches if the files exist?
class ROIColors(object):
"""
A Class that defines uniform colorings for ROIs and layers for visualization
"""
def __init__(self,color_type='uni'):
"""
Initializes the colors class
:param color_type: 'uni'/'layers' indicate if it should return only one color per ROI ('Uni')
or 6 colors per ROI, for 6 layers('Layers')
"""
roi_colors_rgb = {'VISp': [.43, .25, .63], 'VISl': [0.03, 0.29, 0.48], 'VISrl': [0.26, 0.68, 0.76],
'VISal': [0.65, 0.46, 0.11], 'VISpm': [1, .7, .3], 'VISam': [0.8, 0.11, 0.11]}
self.ROI_names = {'VISp': 'V1', 'VISl': 'LM', 'VISrl': 'RL', 'VISal': 'AL', 'VISpm': 'PM', 'VISam': 'AM'}
if color_type == 'uni':
self.roi_colors_rgb = roi_colors_rgb
self.roi_colors_hex = dict((x, '#%02x%02x%02x' % (int(v[0] * 255), int(v[1] * 255), int(v[2] * 255))) for x, v in
roi_colors_rgb.items())
elif color_type =='layers':
offset = np.arange(-.25,.26,.1)
roi_colors_rgb_layers = dict(
(x, np.array([np.minimum(np.maximum(v + x, 0), 1) for x in offset])) for x, v in roi_colors_rgb.items())
self.roi_colors_rgb = roi_colors_rgb_layers
self.roi_colors_hex = dict((x,['#%02x%02x%02x' % (int(v[0]*255), int(v[1]*255), int(v[2]*255)) for v in k])
for x,k in roi_colors_rgb_layers.items())
else:
print ('Wrong color type')
return
self.color_type = color_type
def LFP_plot(Y, TimeWin, colors, figure_path):
"""
A general function to plot LFP averages
:param Y: LFP data with dimensions :trials x layers x time x conditions
:param TimeWin:
:param colors:
:param figure_path:
:return:
"""
nroi = len(Y.keys())
fig, axs = plt.subplots(nrows=nroi, ncols=1, figsize=(6, 2 * nroi), sharex=True)
# ordered ROIs: for uniformity puporse
ordered_rois = ['VISp','VISl','VISrl','VISal','VISpm','VISam']
ROIs = list(filter(lambda x: (x in list(Y.keys())), ordered_rois))
# for each ROI plot mean and SEM
for i in range(0, nroi):
roi = ROIs[i]
T = Y[roi].time.values
T_ind = np.where((T >= TimeWin[0]) & (T <= TimeWin[1]))[0]
y = Y[roi].isel(time=T_ind)
y = np.moveaxis(y.__array__(), -1, 0)
dims = y.shape
y2 = y.reshape(dims[0] * dims[1], dims[2], dims[3])
MEAN = np.nanmean(y2, axis=0).transpose()
SEM = (np.nanstd(y2, axis=0) / (y2.shape[0] ** .5)).transpose()
offset = abs(MEAN).max(axis=(0, 1))
yticks = np.zeros([MEAN.shape[1],1])
for l in range(0, MEAN.shape[1]):
MEAN_plot = MEAN[:, l] - (offset * l)
axs[i].plot(T[T_ind], MEAN_plot,
linewidth=1, label='L{}'.format(l), color=colors.roi_colors_hex[roi][l])
axs[i].fill_between(T[T_ind], MEAN[:, l] - (offset * l) + SEM[:, l], MEAN[:, l] - (offset * l) - SEM[:, l],
alpha=.5, color=colors.roi_colors_hex[roi][l])
yticks[l]= MEAN_plot[T[T_ind]<0].mean()
axs[i].set_title(colors.ROI_names[roi])
axs[i].set_yticks(yticks)
axs[i].set_yticklabels(['L{}'.format(i+1) for i in range(0, MEAN.shape[1])])
axs[i].axvline(x=0, linewidth=1, linestyle='--', color='k')
axs[i].grid(True)
if i == nroi - 1:
axs[i].set_xlabel('Time(S)',fontweight='bold')
axs[i].set_xlim(TimeWin[0], TimeWin[1])
#axs[i].legend(loc='right')
plt.savefig(figure_path, bbox_inches='tight', dpi=300)
plt.close(fig)
def aggregate_LFP_ROI(Y_list):
"""
:param Y_list:
:return:
"""
ROIs_All = reduce(lambda x, y: list(set().union(x, y)), [x.keys() for x in Y_list.values()])
Y_ROI_all = {'session_ids': Y_list.keys(),
'ROIs': ROIs_All,
'Y': {}}
# first indicate the ROIs in the list
for roi in ROIs_All:
s_ids = np.where(np.array([list(x.keys()).count(roi) > 0 for x in Y_list.values()]))[0]
# -for animals with that ROI: make a list and concatenate them-
LFP_temp = [Y_list[list(Y_list.keys())[x]][roi] for x in s_ids]
# -time indexes with non NaN values and round them 3 digit to be uniform-
NNan_ind = [np.logical_not(np.isnan(x.time.values)) for x in LFP_temp]
NNan_ind = reduce(lambda x, y: np.logical_and(x[:min(len(x), len(y))], y[:min(len(x), len(y))]), NNan_ind)
LFP_temp2 = []
for lfp in LFP_temp: # loop over animals
lfp.time.values = np.round(lfp.time.values, 3)
lfp.channel.values = np.arange(0,len(lfp.channel.values))
LFP_temp2.append(lfp.isel(time=np.where(NNan_ind)[0]))
# -calculate average over animals-??
#Y_ROI_all['Y'][roi] = np.array(LFP_temp2).mean(axis=0)
Y_temp = np.expand_dims(np.array(LFP_temp2),axis=3)
Y_ROI_all['Y'][roi] = xr.DataArray(Y_temp, dims=['trial', 'channel', 'time', 'cnd_id'],
coords=dict(trial=range(0, Y_temp.shape[0]), channel=lfp.channel.values, time=lfp.time.values[:Y_temp.shape[2]], cnd_id=[1]))
return Y_ROI_all | print("no preprocessing with these parameters is done")
return | conditional_block |
lfp_session.py | import os
import probe_functions as ProbeF
import pdc_functions as PDCF
import _pickle as cPickle
import pandas as pd
import numpy as np
from functools import reduce
import dynet_statespace as dsspace
import dynet_con as dcon
import xarray as xr
import matplotlib.pyplot as plt
from functools import reduce
class LFPSession(object):
"""
Class to access, store, and retrieve LFP session data, apply pre-processing and estimate iPDC
"""
def __init__(self,cache,session_id,result_path):
"""
Initialize the class based on AllenBrainSDK session
:param cache: cache from EcephysProjectCache.from_warehouse(manifest=manifest_path)
:param session_id: ID for allenSDK session
:param result_path: Path to save the results
"""
self.session_id = session_id
# Add the resultpath folder for this session #### be careful about this variable when saving and loading (both Paths)
if not os.path.exists(os.path.join(result_path, str(self.session_id))):
os.mkdir(os.path.join(result_path, str(self.session_id)))
self.result_path = os.path.join(result_path, str(self.session_id))
# check if the LFP session already exist, load that session preprocessing info
try:
self.load_session()
except FileNotFoundError:
# self.cond_name = cond_name
self.preprocess = [] # any preprocessing is done? list of the preprocessing params
self.RF = False # Channel info is stored?
self.CSD = False # CSD plots for layer assignment are done before?
self.ROIs = {} # empty dictionary indicating cortical ROI (VIS areas) and their relative probes
self.session = cache.get_session_data(session_id) # Get allenSDK session
# variables for running time only
self.probes = dict.fromkeys(self.session.probes.index.values) # Get the probes for this session, make a dictionary maybe
self.loaded_cond = None #Load LFP option
self.layer_selected = False # if the loaded LFP is spatially down-sampled
## Class methods read/write the LFPSession from/to file (note: only preprocessing info is important)
def save_session(self):
"""
Saves session and preprocessing information to a .obj file using cPickle
:return: file path/name
""" | temp.probes = dict.fromkeys(temp.probes.keys())
temp.loaded_cond = None
temp.layer_selected = False
cPickle.dump(temp.__dict__, filehandler)
filehandler.close()
return filename
def load_session(self): # be careful about this -> result_path
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
file = open(filename, 'rb')
dataPickle = file.read()
file.close()
self.__dict__ = cPickle.loads(dataPickle)
def __str__(self):
return str(self.__dict__).replace(", '", ",\n '")
## Processing methods
def preprocessing(self,cond_name='drifting_gratings', down_sample_rate=5, pre_stim = 1, do_RF=False, do_CSD=False, do_probe=False):
"""
Runs the preprocessing on the session with the input parameters, if it has not been run before.
:param cond_name: condition name to be preprocessed
:param do_RF: do receptive field mapping plots? Attention: this may take a while if set True, note it is not RF mappning based on permutation
:param down_sample_rate:
:param pre_stim: prestimulus time in sec
:return:
"""
# first indicate if the
preproc_dict = {
'cond_name': cond_name,
'srate': down_sample_rate,
'prestim': pre_stim,
}
# Attention: remove the zero conditions
if not search_preproc(self.preprocess,preproc_dict):
for probe_id in self.probes.keys():
# Load lfp data
lfp =self.session.get_lfp(probe_id)
# First extract probe info and save
if do_RF:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, do_RF)
self.RF = True
elif not self.RF or do_probe:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, False)
# CSD plot for the probe
if (not self.CSD) and do_CSD:
ProbeF.CSD_plots(self.session, lfp, probe_id, self.result_path)
# Extract and prepare the data for a condition
if cond_name is not None:
ROI = ProbeF.prepare_condition(self.session, self.session_id, lfp, probe_id, cond_name, self.result_path, pre_stim, down_sample_rate)
self.ROIs[ROI] = probe_id
# Add the pre-process params as a dictionary to the list of preprocessed data
if cond_name is not None:
self.preprocess.append(preproc_dict)
if (not self.CSD) and do_CSD:
self.CSD = True
if not self.RF or do_probe:
self.RF = True
# Save the session after preprocessing
self.save_session()
def load_LFPprobes(self, cond_dict):
"""
loads in the preprocessed LFP signal
:param cond_dict: a dictionary with the preprocessing params
:return: Updates the self.probes values
"""
preprocess_ind = search_preproc(self.preprocess, cond_dict)
if not preprocess_ind: # checks if the condition is previously run
print("no preprocessing with these parameters is done")
return
cond = self.preprocess[preprocess_ind[0]]
for probe_id in self.probes.keys():
# first prepare the file name
filename = os.path.join(self.result_path, 'PrepData', '{}_{}{}_pres{}s.pkl'.format(
probe_id, cond['cond_name'], int(cond['srate']),cond['prestim']))
# second load each probe and add it to the ROI list
self.probes[probe_id] = ProbeF.LFPprobe.from_file(filename)
self.loaded_cond = cond['cond_name']
def layer_selection(self, Filename=None):
"""
This will be done on the loaded_cond data
:return:
"""
if Filename==None:
Filename = os.path.join(self.result_path,'PrepData','Cortical_Layers.xlsx')
try:
layer_table = pd.read_excel(Filename)
# set the layer names as index of the dataframe
layer_table.set_index('Layers', inplace=True)
except OSError:
# if the layer file did not exist then return with an error
print("Prepare the cortical layer files first as PrepData/Cortical_Layers.xlsx")
return
for probe_id in self.probes.keys():
print(probe_id)
#ProbeF.layer_reduction(self.probes[probe_id].Y, probe_id, self.result_path)
channel_id = ProbeF.layer_selection(layer_table, probe_id, self.result_path)
# select the LFP of those channels, and relabel the xarray dimensions
if len(channel_id) > 0:
self.probes[probe_id].Y = self.probes[probe_id].Y.sel(channel=channel_id.to_list())
else:
self.probes[probe_id].Y = []
self.layer_selected = True
def pdc_analysis(self, ROI_list=None, Mord=10, ff=.99, pdc_method='iPDC', stim_params=None, Freqs=np.array(range(1, 101)), preproc_params=None, redo = False):
"""
Calculates time- and frequency-resolved functional connectivity between the LFP signals based on STOK algorithm
:param ROI_list: list of ROIs to be considered for this analysis
:param Mord: Model order for ARMA model
:param ff: filter factor between 0 and 1
:param pdc_method: check the pydynet toolbox for that
:param stim_params: Parameters of stimulus to be used to pool the data
:param Freqs: a numpy array uncluding the Frequencies for connectivity analysis
:return:
"""
if ROI_list is None:
ROI_list = ['VISp']
if stim_params is None:
stim_params = []
#----------------------------------------------------------------------------
# Check if the PDC exist, just load it
# analysis params
PDCparam_dict = {
'ROI_list': ROI_list,
'Mord': Mord,
'ff': ff,
'pdc_method': pdc_method,
'stim_param': stim_params
}
filename = PDCF.search_PDC(self.session_id, self.result_path, PDCparam_dict, preproc_params)
if os.path.isfile(filename) and not redo:
# load the file and return it
file = open(filename, 'rb')
PDC_dict = cPickle.load(file)
file.close()
return PDC_dict
#----------------------------------------------------------------------------
# load the preprocessed LFPs and down sample spatially by selecting 6 layers
self.load_LFPprobes(preproc_params)
self.layer_selection()
# select the conditions and pool their trials together
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params= stim_params, ROI_list = ROI_list)
Y = Result_pool['Y']
Srate = Result_pool['Srate']
# pull together and ROI-layer index
srate = np.unique(np.array(list(Srate.values())))
if len(srate) != 1:
print("Sampling rates do not match between probes, please check the preprocessing!")
return
# Put the data from all ROIs together for PDC calculations
Y_temp = np.concatenate(list(Y.values()), axis=1) # second dimension is the channels
Y_temp = np.moveaxis(Y_temp, -1, 0)
YS = list(Y_temp.shape)
Y_pool = Y_temp.reshape([YS[0] * YS[1], YS[2], YS[3]])
# remove possible zero and NaN values (trials)
nzero_trl = Y_pool[:, :, 10] != 0
nzero_trl_ind = reduce((lambda x, y: np.logical_or(x, y)), nzero_trl.transpose())
nNan_trl_ind = np.isnan(Y_pool).sum(axis=2).sum(axis=1) == 0
Y_pooled = Y_pool[nzero_trl_ind & nNan_trl_ind, :, :]
# iPDC matrix
KF = dsspace.dynet_SSM_STOK(Y_pooled, p=Mord, ff=ff)
iPDC = dcon.dynet_ar2pdc(KF, srate, Freqs, metric=pdc_method, univ=1, flow=2, PSD =1)
# iPDC to xarray
Time = Y['VISp'].time.values
ROI_ls = np.array(Result_pool['ROI_labels']).reshape(np.prod(np.array(Result_pool['ROI_labels']).shape))
iPDC_xr = xr.DataArray(iPDC, dims=['target', 'source', 'freq' , 'time'],
coords=dict(target= ROI_ls, source= ROI_ls, freq=Freqs, time=Time))
# ROIs for output
ROIs = list(Y.keys())
chnl_ids = np.array(Result_pool['channel_ids']).reshape(np.prod(np.array(Result_pool['channel_ids']).shape))
prb_ids = np.array(Result_pool['probe_ids']).reshape(np.prod(np.array(Result_pool['probe_ids']).shape))
# save and return the output
PDC_dict = {'session_id':self.session_id, 'KF': KF, 'ROIs': ROIs, 'PDC': iPDC_xr,
'probe_info': {'probe_ids': prb_ids, 'channel_ids': chnl_ids}, 'PDCparam_dict': PDCparam_dict, 'preproc_dict': preproc_params}
PDCF.save_PDC(PDC_dict, self.result_path)
# save?
return PDC_dict
def pool_data(self, preproc_params=None, stim_params= None, ROI_list = None):
# select the conditions and pool their trials together
Y = {} # to prepare the data for PDC analysis
Srate = {} # to make sure that Srates match
ROI_labels = []
channel_ids = []
probe_ids = []
# All ROIs in this session
All_ROIs = [(self.probes[x].ROI, x) for x in self.probes.keys()]
for ROI in ROI_list:
# find the ROIs and the one with Layer assignment
ch_ind = [i for i, y in enumerate([x[0] for x in All_ROIs]) if y == ROI]
if bool(ch_ind): # in case of multiple recordings from the same ROI, I only labeled the one with better data
temp = [len(self.probes[All_ROIs[x][1]].Y)>0 for x in ch_ind]
Emp_ind = np.where(np.array(temp))[0]# find empty probes -> because no layer was assigned
if len(Emp_ind)>0:
ch_ind = ch_ind[Emp_ind[0]]
#ch_ind = ch_ind[temp.index(True)]
else:
ch_ind = []
if bool(ch_ind) or (ch_ind==0): #if there is a probe
probe_id = All_ROIs[ch_ind][1]
cnd_info = self.probes[probe_id].cnd_info
Cnds_inds = []
for k in stim_params.keys():
Cnds = [cnd_info[k] == x for x in stim_params[k]]
if len(Cnds) > 1:
Cnds_temp = reduce((lambda x, y: np.logical_or(x, y)), [c.to_numpy() for c in Cnds])
Cnds_inds.append(Cnds_temp)
else:
Cnds_inds.append(Cnds)
Cnds_final = np.array(reduce((lambda x, y: np.logical_and(x, y)), Cnds_inds))
Cnds_inds_final = cnd_info['stimulus_condition_id'].to_numpy()[Cnds_final.squeeze()]
# Prepare for output
Y[ROI] = self.probes[probe_id].Y.sel(cnd_id=Cnds_inds_final)
Srate[ROI] = self.probes[probe_id].srate
ROI_labels.append(['{}_L{}'.format(ROI, i) for i in range(1, 7)])
channel_ids.append(Y[ROI].channel.values)
probe_ids.append([probe_id for l in range(1, 7)])
# Set other outputs
Time = Y['VISp'].time.values
ROIs = list(Y.keys())
return {'Y': Y, 'Srate': Srate, 'ROI_labels':ROI_labels, 'channel_ids':channel_ids, 'probe_ids':probe_ids}
def plot_LFPs(self, preproc_params=None, stim_params= None, ROI_list = None, TimeWin=None):
self.load_LFPprobes(preproc_params)
self.layer_selection()
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params=stim_params, ROI_list=ROI_list)
figure_path = os.path.join(self.result_path, 'Average_LFP_{}_downs{}.png'.format(
preproc_params['cond_name'], int(preproc_params['srate'])))
colors = ROIColors('layers')
LFP_plot(Result_pool['Y'],TimeWin, colors, figure_path)
# Return averaged Y
return dict((x,y.mean(axis=(0,3))) for x,y in Result_pool['Y'].items())
def search_preproc(list_pre, dic_pre):
"""
Search if the preprocessing with the current parameters has been run before
:param list_pre: self.preprocess
:param dic_pre: dictionary with new params
:return: The index of pre-processes with current params
"""
result = []
for x in list_pre:
shared_items = [x[k] == dic_pre[k] for k in x if k in dic_pre]
result.append(sum(shared_items)==len(dic_pre))
return [i for i, x in enumerate(result) if x]
# maybe also searches if the files exist?
class ROIColors(object):
"""
A Class that defines uniform colorings for ROIs and layers for visualization
"""
def __init__(self,color_type='uni'):
"""
Initializes the colors class
:param color_type: 'uni'/'layers' indicate if it should return only one color per ROI ('Uni')
or 6 colors per ROI, for 6 layers('Layers')
"""
roi_colors_rgb = {'VISp': [.43, .25, .63], 'VISl': [0.03, 0.29, 0.48], 'VISrl': [0.26, 0.68, 0.76],
'VISal': [0.65, 0.46, 0.11], 'VISpm': [1, .7, .3], 'VISam': [0.8, 0.11, 0.11]}
self.ROI_names = {'VISp': 'V1', 'VISl': 'LM', 'VISrl': 'RL', 'VISal': 'AL', 'VISpm': 'PM', 'VISam': 'AM'}
if color_type == 'uni':
self.roi_colors_rgb = roi_colors_rgb
self.roi_colors_hex = dict((x, '#%02x%02x%02x' % (int(v[0] * 255), int(v[1] * 255), int(v[2] * 255))) for x, v in
roi_colors_rgb.items())
elif color_type =='layers':
offset = np.arange(-.25,.26,.1)
roi_colors_rgb_layers = dict(
(x, np.array([np.minimum(np.maximum(v + x, 0), 1) for x in offset])) for x, v in roi_colors_rgb.items())
self.roi_colors_rgb = roi_colors_rgb_layers
self.roi_colors_hex = dict((x,['#%02x%02x%02x' % (int(v[0]*255), int(v[1]*255), int(v[2]*255)) for v in k])
for x,k in roi_colors_rgb_layers.items())
else:
print ('Wrong color type')
return
self.color_type = color_type
def LFP_plot(Y, TimeWin, colors, figure_path):
"""
A general function to plot LFP averages
:param Y: LFP data with dimensions :trials x layers x time x conditions
:param TimeWin:
:param colors:
:param figure_path:
:return:
"""
nroi = len(Y.keys())
fig, axs = plt.subplots(nrows=nroi, ncols=1, figsize=(6, 2 * nroi), sharex=True)
# ordered ROIs: for uniformity puporse
ordered_rois = ['VISp','VISl','VISrl','VISal','VISpm','VISam']
ROIs = list(filter(lambda x: (x in list(Y.keys())), ordered_rois))
# for each ROI plot mean and SEM
for i in range(0, nroi):
roi = ROIs[i]
T = Y[roi].time.values
T_ind = np.where((T >= TimeWin[0]) & (T <= TimeWin[1]))[0]
y = Y[roi].isel(time=T_ind)
y = np.moveaxis(y.__array__(), -1, 0)
dims = y.shape
y2 = y.reshape(dims[0] * dims[1], dims[2], dims[3])
MEAN = np.nanmean(y2, axis=0).transpose()
SEM = (np.nanstd(y2, axis=0) / (y2.shape[0] ** .5)).transpose()
offset = abs(MEAN).max(axis=(0, 1))
yticks = np.zeros([MEAN.shape[1],1])
for l in range(0, MEAN.shape[1]):
MEAN_plot = MEAN[:, l] - (offset * l)
axs[i].plot(T[T_ind], MEAN_plot,
linewidth=1, label='L{}'.format(l), color=colors.roi_colors_hex[roi][l])
axs[i].fill_between(T[T_ind], MEAN[:, l] - (offset * l) + SEM[:, l], MEAN[:, l] - (offset * l) - SEM[:, l],
alpha=.5, color=colors.roi_colors_hex[roi][l])
yticks[l]= MEAN_plot[T[T_ind]<0].mean()
axs[i].set_title(colors.ROI_names[roi])
axs[i].set_yticks(yticks)
axs[i].set_yticklabels(['L{}'.format(i+1) for i in range(0, MEAN.shape[1])])
axs[i].axvline(x=0, linewidth=1, linestyle='--', color='k')
axs[i].grid(True)
if i == nroi - 1:
axs[i].set_xlabel('Time(S)',fontweight='bold')
axs[i].set_xlim(TimeWin[0], TimeWin[1])
#axs[i].legend(loc='right')
plt.savefig(figure_path, bbox_inches='tight', dpi=300)
plt.close(fig)
def aggregate_LFP_ROI(Y_list):
"""
:param Y_list:
:return:
"""
ROIs_All = reduce(lambda x, y: list(set().union(x, y)), [x.keys() for x in Y_list.values()])
Y_ROI_all = {'session_ids': Y_list.keys(),
'ROIs': ROIs_All,
'Y': {}}
# first indicate the ROIs in the list
for roi in ROIs_All:
s_ids = np.where(np.array([list(x.keys()).count(roi) > 0 for x in Y_list.values()]))[0]
# -for animals with that ROI: make a list and concatenate them-
LFP_temp = [Y_list[list(Y_list.keys())[x]][roi] for x in s_ids]
# -time indexes with non NaN values and round them 3 digit to be uniform-
NNan_ind = [np.logical_not(np.isnan(x.time.values)) for x in LFP_temp]
NNan_ind = reduce(lambda x, y: np.logical_and(x[:min(len(x), len(y))], y[:min(len(x), len(y))]), NNan_ind)
LFP_temp2 = []
for lfp in LFP_temp: # loop over animals
lfp.time.values = np.round(lfp.time.values, 3)
lfp.channel.values = np.arange(0,len(lfp.channel.values))
LFP_temp2.append(lfp.isel(time=np.where(NNan_ind)[0]))
# -calculate average over animals-??
#Y_ROI_all['Y'][roi] = np.array(LFP_temp2).mean(axis=0)
Y_temp = np.expand_dims(np.array(LFP_temp2),axis=3)
Y_ROI_all['Y'][roi] = xr.DataArray(Y_temp, dims=['trial', 'channel', 'time', 'cnd_id'],
coords=dict(trial=range(0, Y_temp.shape[0]), channel=lfp.channel.values, time=lfp.time.values[:Y_temp.shape[2]], cnd_id=[1]))
return Y_ROI_all | filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
filehandler = open(filename, "wb")
# Do not save the loaded LFP matrices since they are too big
temp = self | random_line_split |
lfp_session.py |
import os
import probe_functions as ProbeF
import pdc_functions as PDCF
import _pickle as cPickle
import pandas as pd
import numpy as np
from functools import reduce
import dynet_statespace as dsspace
import dynet_con as dcon
import xarray as xr
import matplotlib.pyplot as plt
from functools import reduce
class LFPSession(object):
"""
Class to access, store, and retrieve LFP session data, apply pre-processing and estimate iPDC
"""
def __init__(self,cache,session_id,result_path):
"""
Initialize the class based on AllenBrainSDK session
:param cache: cache from EcephysProjectCache.from_warehouse(manifest=manifest_path)
:param session_id: ID for allenSDK session
:param result_path: Path to save the results
"""
self.session_id = session_id
# Add the resultpath folder for this session #### be careful about this variable when saving and loading (both Paths)
if not os.path.exists(os.path.join(result_path, str(self.session_id))):
os.mkdir(os.path.join(result_path, str(self.session_id)))
self.result_path = os.path.join(result_path, str(self.session_id))
# check if the LFP session already exist, load that session preprocessing info
try:
self.load_session()
except FileNotFoundError:
# self.cond_name = cond_name
self.preprocess = [] # any preprocessing is done? list of the preprocessing params
self.RF = False # Channel info is stored?
self.CSD = False # CSD plots for layer assignment are done before?
self.ROIs = {} # empty dictionary indicating cortical ROI (VIS areas) and their relative probes
self.session = cache.get_session_data(session_id) # Get allenSDK session
# variables for running time only
self.probes = dict.fromkeys(self.session.probes.index.values) # Get the probes for this session, make a dictionary maybe
self.loaded_cond = None #Load LFP option
self.layer_selected = False # if the loaded LFP is spatially down-sampled
## Class methods read/write the LFPSession from/to file (note: only preprocessing info is important)
def save_session(self):
"""
Saves session and preprocessing information to a .obj file using cPickle
:return: file path/name
"""
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
filehandler = open(filename, "wb")
# Do not save the loaded LFP matrices since they are too big
temp = self
temp.probes = dict.fromkeys(temp.probes.keys())
temp.loaded_cond = None
temp.layer_selected = False
cPickle.dump(temp.__dict__, filehandler)
filehandler.close()
return filename
def load_session(self): # be careful about this -> result_path
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
file = open(filename, 'rb')
dataPickle = file.read()
file.close()
self.__dict__ = cPickle.loads(dataPickle)
def __str__(self):
return str(self.__dict__).replace(", '", ",\n '")
## Processing methods
def preprocessing(self,cond_name='drifting_gratings', down_sample_rate=5, pre_stim = 1, do_RF=False, do_CSD=False, do_probe=False):
"""
Runs the preprocessing on the session with the input parameters, if it has not been run before.
:param cond_name: condition name to be preprocessed
:param do_RF: do receptive field mapping plots? Attention: this may take a while if set True, note it is not RF mappning based on permutation
:param down_sample_rate:
:param pre_stim: prestimulus time in sec
:return:
"""
# first indicate if the
preproc_dict = {
'cond_name': cond_name,
'srate': down_sample_rate,
'prestim': pre_stim,
}
# Attention: remove the zero conditions
if not search_preproc(self.preprocess,preproc_dict):
for probe_id in self.probes.keys():
# Load lfp data
lfp =self.session.get_lfp(probe_id)
# First extract probe info and save
if do_RF:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, do_RF)
self.RF = True
elif not self.RF or do_probe:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, False)
# CSD plot for the probe
if (not self.CSD) and do_CSD:
ProbeF.CSD_plots(self.session, lfp, probe_id, self.result_path)
# Extract and prepare the data for a condition
if cond_name is not None:
ROI = ProbeF.prepare_condition(self.session, self.session_id, lfp, probe_id, cond_name, self.result_path, pre_stim, down_sample_rate)
self.ROIs[ROI] = probe_id
# Add the pre-process params as a dictionary to the list of preprocessed data
if cond_name is not None:
self.preprocess.append(preproc_dict)
if (not self.CSD) and do_CSD:
self.CSD = True
if not self.RF or do_probe:
self.RF = True
# Save the session after preprocessing
self.save_session()
def load_LFPprobes(self, cond_dict):
"""
loads in the preprocessed LFP signal
:param cond_dict: a dictionary with the preprocessing params
:return: Updates the self.probes values
"""
preprocess_ind = search_preproc(self.preprocess, cond_dict)
if not preprocess_ind: # checks if the condition is previously run
print("no preprocessing with these parameters is done")
return
cond = self.preprocess[preprocess_ind[0]]
for probe_id in self.probes.keys():
# first prepare the file name
filename = os.path.join(self.result_path, 'PrepData', '{}_{}{}_pres{}s.pkl'.format(
probe_id, cond['cond_name'], int(cond['srate']),cond['prestim']))
# second load each probe and add it to the ROI list
self.probes[probe_id] = ProbeF.LFPprobe.from_file(filename)
self.loaded_cond = cond['cond_name']
def layer_selection(self, Filename=None):
"""
This will be done on the loaded_cond data
:return:
"""
if Filename==None:
Filename = os.path.join(self.result_path,'PrepData','Cortical_Layers.xlsx')
try:
layer_table = pd.read_excel(Filename)
# set the layer names as index of the dataframe
layer_table.set_index('Layers', inplace=True)
except OSError:
# if the layer file did not exist then return with an error
print("Prepare the cortical layer files first as PrepData/Cortical_Layers.xlsx")
return
for probe_id in self.probes.keys():
print(probe_id)
#ProbeF.layer_reduction(self.probes[probe_id].Y, probe_id, self.result_path)
channel_id = ProbeF.layer_selection(layer_table, probe_id, self.result_path)
# select the LFP of those channels, and relabel the xarray dimensions
if len(channel_id) > 0:
self.probes[probe_id].Y = self.probes[probe_id].Y.sel(channel=channel_id.to_list())
else:
self.probes[probe_id].Y = []
self.layer_selected = True
def pdc_analysis(self, ROI_list=None, Mord=10, ff=.99, pdc_method='iPDC', stim_params=None, Freqs=np.array(range(1, 101)), preproc_params=None, redo = False):
"""
Calculates time- and frequency-resolved functional connectivity between the LFP signals based on STOK algorithm
:param ROI_list: list of ROIs to be considered for this analysis
:param Mord: Model order for ARMA model
:param ff: filter factor between 0 and 1
:param pdc_method: check the pydynet toolbox for that
:param stim_params: Parameters of stimulus to be used to pool the data
:param Freqs: a numpy array uncluding the Frequencies for connectivity analysis
:return:
"""
if ROI_list is None:
ROI_list = ['VISp']
if stim_params is None:
stim_params = []
#----------------------------------------------------------------------------
# Check if the PDC exist, just load it
# analysis params
PDCparam_dict = {
'ROI_list': ROI_list,
'Mord': Mord,
'ff': ff,
'pdc_method': pdc_method,
'stim_param': stim_params
}
filename = PDCF.search_PDC(self.session_id, self.result_path, PDCparam_dict, preproc_params)
if os.path.isfile(filename) and not redo:
# load the file and return it
file = open(filename, 'rb')
PDC_dict = cPickle.load(file)
file.close()
return PDC_dict
#----------------------------------------------------------------------------
# load the preprocessed LFPs and down sample spatially by selecting 6 layers
self.load_LFPprobes(preproc_params)
self.layer_selection()
# select the conditions and pool their trials together
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params= stim_params, ROI_list = ROI_list)
Y = Result_pool['Y']
Srate = Result_pool['Srate']
# pull together and ROI-layer index
srate = np.unique(np.array(list(Srate.values())))
if len(srate) != 1:
print("Sampling rates do not match between probes, please check the preprocessing!")
return
# Put the data from all ROIs together for PDC calculations
Y_temp = np.concatenate(list(Y.values()), axis=1) # second dimension is the channels
Y_temp = np.moveaxis(Y_temp, -1, 0)
YS = list(Y_temp.shape)
Y_pool = Y_temp.reshape([YS[0] * YS[1], YS[2], YS[3]])
# remove possible zero and NaN values (trials)
nzero_trl = Y_pool[:, :, 10] != 0
nzero_trl_ind = reduce((lambda x, y: np.logical_or(x, y)), nzero_trl.transpose())
nNan_trl_ind = np.isnan(Y_pool).sum(axis=2).sum(axis=1) == 0
Y_pooled = Y_pool[nzero_trl_ind & nNan_trl_ind, :, :]
# iPDC matrix
KF = dsspace.dynet_SSM_STOK(Y_pooled, p=Mord, ff=ff)
iPDC = dcon.dynet_ar2pdc(KF, srate, Freqs, metric=pdc_method, univ=1, flow=2, PSD =1)
# iPDC to xarray
Time = Y['VISp'].time.values
ROI_ls = np.array(Result_pool['ROI_labels']).reshape(np.prod(np.array(Result_pool['ROI_labels']).shape))
iPDC_xr = xr.DataArray(iPDC, dims=['target', 'source', 'freq' , 'time'],
coords=dict(target= ROI_ls, source= ROI_ls, freq=Freqs, time=Time))
# ROIs for output
ROIs = list(Y.keys())
chnl_ids = np.array(Result_pool['channel_ids']).reshape(np.prod(np.array(Result_pool['channel_ids']).shape))
prb_ids = np.array(Result_pool['probe_ids']).reshape(np.prod(np.array(Result_pool['probe_ids']).shape))
# save and return the output
PDC_dict = {'session_id':self.session_id, 'KF': KF, 'ROIs': ROIs, 'PDC': iPDC_xr,
'probe_info': {'probe_ids': prb_ids, 'channel_ids': chnl_ids}, 'PDCparam_dict': PDCparam_dict, 'preproc_dict': preproc_params}
PDCF.save_PDC(PDC_dict, self.result_path)
# save?
return PDC_dict
def pool_data(self, preproc_params=None, stim_params= None, ROI_list = None):
# select the conditions and pool their trials together
Y = {} # to prepare the data for PDC analysis
Srate = {} # to make sure that Srates match
ROI_labels = []
channel_ids = []
probe_ids = []
# All ROIs in this session
All_ROIs = [(self.probes[x].ROI, x) for x in self.probes.keys()]
for ROI in ROI_list:
# find the ROIs and the one with Layer assignment
ch_ind = [i for i, y in enumerate([x[0] for x in All_ROIs]) if y == ROI]
if bool(ch_ind): # in case of multiple recordings from the same ROI, I only labeled the one with better data
temp = [len(self.probes[All_ROIs[x][1]].Y)>0 for x in ch_ind]
Emp_ind = np.where(np.array(temp))[0]# find empty probes -> because no layer was assigned
if len(Emp_ind)>0:
ch_ind = ch_ind[Emp_ind[0]]
#ch_ind = ch_ind[temp.index(True)]
else:
ch_ind = []
if bool(ch_ind) or (ch_ind==0): #if there is a probe
probe_id = All_ROIs[ch_ind][1]
cnd_info = self.probes[probe_id].cnd_info
Cnds_inds = []
for k in stim_params.keys():
Cnds = [cnd_info[k] == x for x in stim_params[k]]
if len(Cnds) > 1:
Cnds_temp = reduce((lambda x, y: np.logical_or(x, y)), [c.to_numpy() for c in Cnds])
Cnds_inds.append(Cnds_temp)
else:
Cnds_inds.append(Cnds)
Cnds_final = np.array(reduce((lambda x, y: np.logical_and(x, y)), Cnds_inds))
Cnds_inds_final = cnd_info['stimulus_condition_id'].to_numpy()[Cnds_final.squeeze()]
# Prepare for output
Y[ROI] = self.probes[probe_id].Y.sel(cnd_id=Cnds_inds_final)
Srate[ROI] = self.probes[probe_id].srate
ROI_labels.append(['{}_L{}'.format(ROI, i) for i in range(1, 7)])
channel_ids.append(Y[ROI].channel.values)
probe_ids.append([probe_id for l in range(1, 7)])
# Set other outputs
Time = Y['VISp'].time.values
ROIs = list(Y.keys())
return {'Y': Y, 'Srate': Srate, 'ROI_labels':ROI_labels, 'channel_ids':channel_ids, 'probe_ids':probe_ids}
def plot_LFPs(self, preproc_params=None, stim_params= None, ROI_list = None, TimeWin=None):
self.load_LFPprobes(preproc_params)
self.layer_selection()
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params=stim_params, ROI_list=ROI_list)
figure_path = os.path.join(self.result_path, 'Average_LFP_{}_downs{}.png'.format(
preproc_params['cond_name'], int(preproc_params['srate'])))
colors = ROIColors('layers')
LFP_plot(Result_pool['Y'],TimeWin, colors, figure_path)
# Return averaged Y
return dict((x,y.mean(axis=(0,3))) for x,y in Result_pool['Y'].items())
def search_preproc(list_pre, dic_pre):
"""
Search if the preprocessing with the current parameters has been run before
:param list_pre: self.preprocess
:param dic_pre: dictionary with new params
:return: The index of pre-processes with current params
"""
result = []
for x in list_pre:
shared_items = [x[k] == dic_pre[k] for k in x if k in dic_pre]
result.append(sum(shared_items)==len(dic_pre))
return [i for i, x in enumerate(result) if x]
# maybe also searches if the files exist?
class ROIColors(object):
"""
A Class that defines uniform colorings for ROIs and layers for visualization
"""
def __init__(self,color_type='uni'):
|
def LFP_plot(Y, TimeWin, colors, figure_path):
"""
A general function to plot LFP averages
:param Y: LFP data with dimensions :trials x layers x time x conditions
:param TimeWin:
:param colors:
:param figure_path:
:return:
"""
nroi = len(Y.keys())
fig, axs = plt.subplots(nrows=nroi, ncols=1, figsize=(6, 2 * nroi), sharex=True)
# ordered ROIs: for uniformity puporse
ordered_rois = ['VISp','VISl','VISrl','VISal','VISpm','VISam']
ROIs = list(filter(lambda x: (x in list(Y.keys())), ordered_rois))
# for each ROI plot mean and SEM
for i in range(0, nroi):
roi = ROIs[i]
T = Y[roi].time.values
T_ind = np.where((T >= TimeWin[0]) & (T <= TimeWin[1]))[0]
y = Y[roi].isel(time=T_ind)
y = np.moveaxis(y.__array__(), -1, 0)
dims = y.shape
y2 = y.reshape(dims[0] * dims[1], dims[2], dims[3])
MEAN = np.nanmean(y2, axis=0).transpose()
SEM = (np.nanstd(y2, axis=0) / (y2.shape[0] ** .5)).transpose()
offset = abs(MEAN).max(axis=(0, 1))
yticks = np.zeros([MEAN.shape[1],1])
for l in range(0, MEAN.shape[1]):
MEAN_plot = MEAN[:, l] - (offset * l)
axs[i].plot(T[T_ind], MEAN_plot,
linewidth=1, label='L{}'.format(l), color=colors.roi_colors_hex[roi][l])
axs[i].fill_between(T[T_ind], MEAN[:, l] - (offset * l) + SEM[:, l], MEAN[:, l] - (offset * l) - SEM[:, l],
alpha=.5, color=colors.roi_colors_hex[roi][l])
yticks[l]= MEAN_plot[T[T_ind]<0].mean()
axs[i].set_title(colors.ROI_names[roi])
axs[i].set_yticks(yticks)
axs[i].set_yticklabels(['L{}'.format(i+1) for i in range(0, MEAN.shape[1])])
axs[i].axvline(x=0, linewidth=1, linestyle='--', color='k')
axs[i].grid(True)
if i == nroi - 1:
axs[i].set_xlabel('Time(S)',fontweight='bold')
axs[i].set_xlim(TimeWin[0], TimeWin[1])
#axs[i].legend(loc='right')
plt.savefig(figure_path, bbox_inches='tight', dpi=300)
plt.close(fig)
def aggregate_LFP_ROI(Y_list):
"""
:param Y_list:
:return:
"""
ROIs_All = reduce(lambda x, y: list(set().union(x, y)), [x.keys() for x in Y_list.values()])
Y_ROI_all = {'session_ids': Y_list.keys(),
'ROIs': ROIs_All,
'Y': {}}
# first indicate the ROIs in the list
for roi in ROIs_All:
s_ids = np.where(np.array([list(x.keys()).count(roi) > 0 for x in Y_list.values()]))[0]
# -for animals with that ROI: make a list and concatenate them-
LFP_temp = [Y_list[list(Y_list.keys())[x]][roi] for x in s_ids]
# -time indexes with non NaN values and round them 3 digit to be uniform-
NNan_ind = [np.logical_not(np.isnan(x.time.values)) for x in LFP_temp]
NNan_ind = reduce(lambda x, y: np.logical_and(x[:min(len(x), len(y))], y[:min(len(x), len(y))]), NNan_ind)
LFP_temp2 = []
for lfp in LFP_temp: # loop over animals
lfp.time.values = np.round(lfp.time.values, 3)
lfp.channel.values = np.arange(0,len(lfp.channel.values))
LFP_temp2.append(lfp.isel(time=np.where(NNan_ind)[0]))
# -calculate average over animals-??
#Y_ROI_all['Y'][roi] = np.array(LFP_temp2).mean(axis=0)
Y_temp = np.expand_dims(np.array(LFP_temp2),axis=3)
Y_ROI_all['Y'][roi] = xr.DataArray(Y_temp, dims=['trial', 'channel', 'time', 'cnd_id'],
coords=dict(trial=range(0, Y_temp.shape[0]), channel=lfp.channel.values, time=lfp.time.values[:Y_temp.shape[2]], cnd_id=[1]))
return Y_ROI_all | """
Initializes the colors class
:param color_type: 'uni'/'layers' indicate if it should return only one color per ROI ('Uni')
or 6 colors per ROI, for 6 layers('Layers')
"""
roi_colors_rgb = {'VISp': [.43, .25, .63], 'VISl': [0.03, 0.29, 0.48], 'VISrl': [0.26, 0.68, 0.76],
'VISal': [0.65, 0.46, 0.11], 'VISpm': [1, .7, .3], 'VISam': [0.8, 0.11, 0.11]}
self.ROI_names = {'VISp': 'V1', 'VISl': 'LM', 'VISrl': 'RL', 'VISal': 'AL', 'VISpm': 'PM', 'VISam': 'AM'}
if color_type == 'uni':
self.roi_colors_rgb = roi_colors_rgb
self.roi_colors_hex = dict((x, '#%02x%02x%02x' % (int(v[0] * 255), int(v[1] * 255), int(v[2] * 255))) for x, v in
roi_colors_rgb.items())
elif color_type =='layers':
offset = np.arange(-.25,.26,.1)
roi_colors_rgb_layers = dict(
(x, np.array([np.minimum(np.maximum(v + x, 0), 1) for x in offset])) for x, v in roi_colors_rgb.items())
self.roi_colors_rgb = roi_colors_rgb_layers
self.roi_colors_hex = dict((x,['#%02x%02x%02x' % (int(v[0]*255), int(v[1]*255), int(v[2]*255)) for v in k])
for x,k in roi_colors_rgb_layers.items())
else:
print ('Wrong color type')
return
self.color_type = color_type | identifier_body |
lfp_session.py |
import os
import probe_functions as ProbeF
import pdc_functions as PDCF
import _pickle as cPickle
import pandas as pd
import numpy as np
from functools import reduce
import dynet_statespace as dsspace
import dynet_con as dcon
import xarray as xr
import matplotlib.pyplot as plt
from functools import reduce
class LFPSession(object):
"""
Class to access, store, and retrieve LFP session data, apply pre-processing and estimate iPDC
"""
def __init__(self,cache,session_id,result_path):
"""
Initialize the class based on AllenBrainSDK session
:param cache: cache from EcephysProjectCache.from_warehouse(manifest=manifest_path)
:param session_id: ID for allenSDK session
:param result_path: Path to save the results
"""
self.session_id = session_id
# Add the resultpath folder for this session #### be careful about this variable when saving and loading (both Paths)
if not os.path.exists(os.path.join(result_path, str(self.session_id))):
os.mkdir(os.path.join(result_path, str(self.session_id)))
self.result_path = os.path.join(result_path, str(self.session_id))
# check if the LFP session already exist, load that session preprocessing info
try:
self.load_session()
except FileNotFoundError:
# self.cond_name = cond_name
self.preprocess = [] # any preprocessing is done? list of the preprocessing params
self.RF = False # Channel info is stored?
self.CSD = False # CSD plots for layer assignment are done before?
self.ROIs = {} # empty dictionary indicating cortical ROI (VIS areas) and their relative probes
self.session = cache.get_session_data(session_id) # Get allenSDK session
# variables for running time only
self.probes = dict.fromkeys(self.session.probes.index.values) # Get the probes for this session, make a dictionary maybe
self.loaded_cond = None #Load LFP option
self.layer_selected = False # if the loaded LFP is spatially down-sampled
## Class methods read/write the LFPSession from/to file (note: only preprocessing info is important)
def save_session(self):
"""
Saves session and preprocessing information to a .obj file using cPickle
:return: file path/name
"""
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
filehandler = open(filename, "wb")
# Do not save the loaded LFP matrices since they are too big
temp = self
temp.probes = dict.fromkeys(temp.probes.keys())
temp.loaded_cond = None
temp.layer_selected = False
cPickle.dump(temp.__dict__, filehandler)
filehandler.close()
return filename
def load_session(self): # be careful about this -> result_path
filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))
file = open(filename, 'rb')
dataPickle = file.read()
file.close()
self.__dict__ = cPickle.loads(dataPickle)
def __str__(self):
return str(self.__dict__).replace(", '", ",\n '")
## Processing methods
def preprocessing(self,cond_name='drifting_gratings', down_sample_rate=5, pre_stim = 1, do_RF=False, do_CSD=False, do_probe=False):
"""
Runs the preprocessing on the session with the input parameters, if it has not been run before.
:param cond_name: condition name to be preprocessed
:param do_RF: do receptive field mapping plots? Attention: this may take a while if set True, note it is not RF mappning based on permutation
:param down_sample_rate:
:param pre_stim: prestimulus time in sec
:return:
"""
# first indicate if the
preproc_dict = {
'cond_name': cond_name,
'srate': down_sample_rate,
'prestim': pre_stim,
}
# Attention: remove the zero conditions
if not search_preproc(self.preprocess,preproc_dict):
for probe_id in self.probes.keys():
# Load lfp data
lfp =self.session.get_lfp(probe_id)
# First extract probe info and save
if do_RF:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, do_RF)
self.RF = True
elif not self.RF or do_probe:
ProbeF.extract_probeinfo(self.session, lfp, probe_id, self.result_path, False)
# CSD plot for the probe
if (not self.CSD) and do_CSD:
ProbeF.CSD_plots(self.session, lfp, probe_id, self.result_path)
# Extract and prepare the data for a condition
if cond_name is not None:
ROI = ProbeF.prepare_condition(self.session, self.session_id, lfp, probe_id, cond_name, self.result_path, pre_stim, down_sample_rate)
self.ROIs[ROI] = probe_id
# Add the pre-process params as a dictionary to the list of preprocessed data
if cond_name is not None:
self.preprocess.append(preproc_dict)
if (not self.CSD) and do_CSD:
self.CSD = True
if not self.RF or do_probe:
self.RF = True
# Save the session after preprocessing
self.save_session()
def load_LFPprobes(self, cond_dict):
"""
loads in the preprocessed LFP signal
:param cond_dict: a dictionary with the preprocessing params
:return: Updates the self.probes values
"""
preprocess_ind = search_preproc(self.preprocess, cond_dict)
if not preprocess_ind: # checks if the condition is previously run
print("no preprocessing with these parameters is done")
return
cond = self.preprocess[preprocess_ind[0]]
for probe_id in self.probes.keys():
# first prepare the file name
filename = os.path.join(self.result_path, 'PrepData', '{}_{}{}_pres{}s.pkl'.format(
probe_id, cond['cond_name'], int(cond['srate']),cond['prestim']))
# second load each probe and add it to the ROI list
self.probes[probe_id] = ProbeF.LFPprobe.from_file(filename)
self.loaded_cond = cond['cond_name']
def | (self, Filename=None):
"""
This will be done on the loaded_cond data
:return:
"""
if Filename==None:
Filename = os.path.join(self.result_path,'PrepData','Cortical_Layers.xlsx')
try:
layer_table = pd.read_excel(Filename)
# set the layer names as index of the dataframe
layer_table.set_index('Layers', inplace=True)
except OSError:
# if the layer file did not exist then return with an error
print("Prepare the cortical layer files first as PrepData/Cortical_Layers.xlsx")
return
for probe_id in self.probes.keys():
print(probe_id)
#ProbeF.layer_reduction(self.probes[probe_id].Y, probe_id, self.result_path)
channel_id = ProbeF.layer_selection(layer_table, probe_id, self.result_path)
# select the LFP of those channels, and relabel the xarray dimensions
if len(channel_id) > 0:
self.probes[probe_id].Y = self.probes[probe_id].Y.sel(channel=channel_id.to_list())
else:
self.probes[probe_id].Y = []
self.layer_selected = True
def pdc_analysis(self, ROI_list=None, Mord=10, ff=.99, pdc_method='iPDC', stim_params=None, Freqs=np.array(range(1, 101)), preproc_params=None, redo = False):
"""
Calculates time- and frequency-resolved functional connectivity between the LFP signals based on STOK algorithm
:param ROI_list: list of ROIs to be considered for this analysis
:param Mord: Model order for ARMA model
:param ff: filter factor between 0 and 1
:param pdc_method: check the pydynet toolbox for that
:param stim_params: Parameters of stimulus to be used to pool the data
:param Freqs: a numpy array uncluding the Frequencies for connectivity analysis
:return:
"""
if ROI_list is None:
ROI_list = ['VISp']
if stim_params is None:
stim_params = []
#----------------------------------------------------------------------------
# Check if the PDC exist, just load it
# analysis params
PDCparam_dict = {
'ROI_list': ROI_list,
'Mord': Mord,
'ff': ff,
'pdc_method': pdc_method,
'stim_param': stim_params
}
filename = PDCF.search_PDC(self.session_id, self.result_path, PDCparam_dict, preproc_params)
if os.path.isfile(filename) and not redo:
# load the file and return it
file = open(filename, 'rb')
PDC_dict = cPickle.load(file)
file.close()
return PDC_dict
#----------------------------------------------------------------------------
# load the preprocessed LFPs and down sample spatially by selecting 6 layers
self.load_LFPprobes(preproc_params)
self.layer_selection()
# select the conditions and pool their trials together
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params= stim_params, ROI_list = ROI_list)
Y = Result_pool['Y']
Srate = Result_pool['Srate']
# pull together and ROI-layer index
srate = np.unique(np.array(list(Srate.values())))
if len(srate) != 1:
print("Sampling rates do not match between probes, please check the preprocessing!")
return
# Put the data from all ROIs together for PDC calculations
Y_temp = np.concatenate(list(Y.values()), axis=1) # second dimension is the channels
Y_temp = np.moveaxis(Y_temp, -1, 0)
YS = list(Y_temp.shape)
Y_pool = Y_temp.reshape([YS[0] * YS[1], YS[2], YS[3]])
# remove possible zero and NaN values (trials)
nzero_trl = Y_pool[:, :, 10] != 0
nzero_trl_ind = reduce((lambda x, y: np.logical_or(x, y)), nzero_trl.transpose())
nNan_trl_ind = np.isnan(Y_pool).sum(axis=2).sum(axis=1) == 0
Y_pooled = Y_pool[nzero_trl_ind & nNan_trl_ind, :, :]
# iPDC matrix
KF = dsspace.dynet_SSM_STOK(Y_pooled, p=Mord, ff=ff)
iPDC = dcon.dynet_ar2pdc(KF, srate, Freqs, metric=pdc_method, univ=1, flow=2, PSD =1)
# iPDC to xarray
Time = Y['VISp'].time.values
ROI_ls = np.array(Result_pool['ROI_labels']).reshape(np.prod(np.array(Result_pool['ROI_labels']).shape))
iPDC_xr = xr.DataArray(iPDC, dims=['target', 'source', 'freq' , 'time'],
coords=dict(target= ROI_ls, source= ROI_ls, freq=Freqs, time=Time))
# ROIs for output
ROIs = list(Y.keys())
chnl_ids = np.array(Result_pool['channel_ids']).reshape(np.prod(np.array(Result_pool['channel_ids']).shape))
prb_ids = np.array(Result_pool['probe_ids']).reshape(np.prod(np.array(Result_pool['probe_ids']).shape))
# save and return the output
PDC_dict = {'session_id':self.session_id, 'KF': KF, 'ROIs': ROIs, 'PDC': iPDC_xr,
'probe_info': {'probe_ids': prb_ids, 'channel_ids': chnl_ids}, 'PDCparam_dict': PDCparam_dict, 'preproc_dict': preproc_params}
PDCF.save_PDC(PDC_dict, self.result_path)
# save?
return PDC_dict
def pool_data(self, preproc_params=None, stim_params= None, ROI_list = None):
# select the conditions and pool their trials together
Y = {} # to prepare the data for PDC analysis
Srate = {} # to make sure that Srates match
ROI_labels = []
channel_ids = []
probe_ids = []
# All ROIs in this session
All_ROIs = [(self.probes[x].ROI, x) for x in self.probes.keys()]
for ROI in ROI_list:
# find the ROIs and the one with Layer assignment
ch_ind = [i for i, y in enumerate([x[0] for x in All_ROIs]) if y == ROI]
if bool(ch_ind): # in case of multiple recordings from the same ROI, I only labeled the one with better data
temp = [len(self.probes[All_ROIs[x][1]].Y)>0 for x in ch_ind]
Emp_ind = np.where(np.array(temp))[0]# find empty probes -> because no layer was assigned
if len(Emp_ind)>0:
ch_ind = ch_ind[Emp_ind[0]]
#ch_ind = ch_ind[temp.index(True)]
else:
ch_ind = []
if bool(ch_ind) or (ch_ind==0): #if there is a probe
probe_id = All_ROIs[ch_ind][1]
cnd_info = self.probes[probe_id].cnd_info
Cnds_inds = []
for k in stim_params.keys():
Cnds = [cnd_info[k] == x for x in stim_params[k]]
if len(Cnds) > 1:
Cnds_temp = reduce((lambda x, y: np.logical_or(x, y)), [c.to_numpy() for c in Cnds])
Cnds_inds.append(Cnds_temp)
else:
Cnds_inds.append(Cnds)
Cnds_final = np.array(reduce((lambda x, y: np.logical_and(x, y)), Cnds_inds))
Cnds_inds_final = cnd_info['stimulus_condition_id'].to_numpy()[Cnds_final.squeeze()]
# Prepare for output
Y[ROI] = self.probes[probe_id].Y.sel(cnd_id=Cnds_inds_final)
Srate[ROI] = self.probes[probe_id].srate
ROI_labels.append(['{}_L{}'.format(ROI, i) for i in range(1, 7)])
channel_ids.append(Y[ROI].channel.values)
probe_ids.append([probe_id for l in range(1, 7)])
# Set other outputs
Time = Y['VISp'].time.values
ROIs = list(Y.keys())
return {'Y': Y, 'Srate': Srate, 'ROI_labels':ROI_labels, 'channel_ids':channel_ids, 'probe_ids':probe_ids}
def plot_LFPs(self, preproc_params=None, stim_params= None, ROI_list = None, TimeWin=None):
self.load_LFPprobes(preproc_params)
self.layer_selection()
Result_pool = self.pool_data(preproc_params=preproc_params, stim_params=stim_params, ROI_list=ROI_list)
figure_path = os.path.join(self.result_path, 'Average_LFP_{}_downs{}.png'.format(
preproc_params['cond_name'], int(preproc_params['srate'])))
colors = ROIColors('layers')
LFP_plot(Result_pool['Y'],TimeWin, colors, figure_path)
# Return averaged Y
return dict((x,y.mean(axis=(0,3))) for x,y in Result_pool['Y'].items())
def search_preproc(list_pre, dic_pre):
"""
Search if the preprocessing with the current parameters has been run before
:param list_pre: self.preprocess
:param dic_pre: dictionary with new params
:return: The index of pre-processes with current params
"""
result = []
for x in list_pre:
shared_items = [x[k] == dic_pre[k] for k in x if k in dic_pre]
result.append(sum(shared_items)==len(dic_pre))
return [i for i, x in enumerate(result) if x]
# maybe also searches if the files exist?
class ROIColors(object):
"""
A Class that defines uniform colorings for ROIs and layers for visualization
"""
def __init__(self,color_type='uni'):
"""
Initializes the colors class
:param color_type: 'uni'/'layers' indicate if it should return only one color per ROI ('Uni')
or 6 colors per ROI, for 6 layers('Layers')
"""
roi_colors_rgb = {'VISp': [.43, .25, .63], 'VISl': [0.03, 0.29, 0.48], 'VISrl': [0.26, 0.68, 0.76],
'VISal': [0.65, 0.46, 0.11], 'VISpm': [1, .7, .3], 'VISam': [0.8, 0.11, 0.11]}
self.ROI_names = {'VISp': 'V1', 'VISl': 'LM', 'VISrl': 'RL', 'VISal': 'AL', 'VISpm': 'PM', 'VISam': 'AM'}
if color_type == 'uni':
self.roi_colors_rgb = roi_colors_rgb
self.roi_colors_hex = dict((x, '#%02x%02x%02x' % (int(v[0] * 255), int(v[1] * 255), int(v[2] * 255))) for x, v in
roi_colors_rgb.items())
elif color_type =='layers':
offset = np.arange(-.25,.26,.1)
roi_colors_rgb_layers = dict(
(x, np.array([np.minimum(np.maximum(v + x, 0), 1) for x in offset])) for x, v in roi_colors_rgb.items())
self.roi_colors_rgb = roi_colors_rgb_layers
self.roi_colors_hex = dict((x,['#%02x%02x%02x' % (int(v[0]*255), int(v[1]*255), int(v[2]*255)) for v in k])
for x,k in roi_colors_rgb_layers.items())
else:
print ('Wrong color type')
return
self.color_type = color_type
def LFP_plot(Y, TimeWin, colors, figure_path):
"""
A general function to plot LFP averages
:param Y: LFP data with dimensions :trials x layers x time x conditions
:param TimeWin:
:param colors:
:param figure_path:
:return:
"""
nroi = len(Y.keys())
fig, axs = plt.subplots(nrows=nroi, ncols=1, figsize=(6, 2 * nroi), sharex=True)
# ordered ROIs: for uniformity puporse
ordered_rois = ['VISp','VISl','VISrl','VISal','VISpm','VISam']
ROIs = list(filter(lambda x: (x in list(Y.keys())), ordered_rois))
# for each ROI plot mean and SEM
for i in range(0, nroi):
roi = ROIs[i]
T = Y[roi].time.values
T_ind = np.where((T >= TimeWin[0]) & (T <= TimeWin[1]))[0]
y = Y[roi].isel(time=T_ind)
y = np.moveaxis(y.__array__(), -1, 0)
dims = y.shape
y2 = y.reshape(dims[0] * dims[1], dims[2], dims[3])
MEAN = np.nanmean(y2, axis=0).transpose()
SEM = (np.nanstd(y2, axis=0) / (y2.shape[0] ** .5)).transpose()
offset = abs(MEAN).max(axis=(0, 1))
yticks = np.zeros([MEAN.shape[1],1])
for l in range(0, MEAN.shape[1]):
MEAN_plot = MEAN[:, l] - (offset * l)
axs[i].plot(T[T_ind], MEAN_plot,
linewidth=1, label='L{}'.format(l), color=colors.roi_colors_hex[roi][l])
axs[i].fill_between(T[T_ind], MEAN[:, l] - (offset * l) + SEM[:, l], MEAN[:, l] - (offset * l) - SEM[:, l],
alpha=.5, color=colors.roi_colors_hex[roi][l])
yticks[l]= MEAN_plot[T[T_ind]<0].mean()
axs[i].set_title(colors.ROI_names[roi])
axs[i].set_yticks(yticks)
axs[i].set_yticklabels(['L{}'.format(i+1) for i in range(0, MEAN.shape[1])])
axs[i].axvline(x=0, linewidth=1, linestyle='--', color='k')
axs[i].grid(True)
if i == nroi - 1:
axs[i].set_xlabel('Time(S)',fontweight='bold')
axs[i].set_xlim(TimeWin[0], TimeWin[1])
#axs[i].legend(loc='right')
plt.savefig(figure_path, bbox_inches='tight', dpi=300)
plt.close(fig)
def aggregate_LFP_ROI(Y_list):
"""
:param Y_list:
:return:
"""
ROIs_All = reduce(lambda x, y: list(set().union(x, y)), [x.keys() for x in Y_list.values()])
Y_ROI_all = {'session_ids': Y_list.keys(),
'ROIs': ROIs_All,
'Y': {}}
# first indicate the ROIs in the list
for roi in ROIs_All:
s_ids = np.where(np.array([list(x.keys()).count(roi) > 0 for x in Y_list.values()]))[0]
# -for animals with that ROI: make a list and concatenate them-
LFP_temp = [Y_list[list(Y_list.keys())[x]][roi] for x in s_ids]
# -time indexes with non NaN values and round them 3 digit to be uniform-
NNan_ind = [np.logical_not(np.isnan(x.time.values)) for x in LFP_temp]
NNan_ind = reduce(lambda x, y: np.logical_and(x[:min(len(x), len(y))], y[:min(len(x), len(y))]), NNan_ind)
LFP_temp2 = []
for lfp in LFP_temp: # loop over animals
lfp.time.values = np.round(lfp.time.values, 3)
lfp.channel.values = np.arange(0,len(lfp.channel.values))
LFP_temp2.append(lfp.isel(time=np.where(NNan_ind)[0]))
# -calculate average over animals-??
#Y_ROI_all['Y'][roi] = np.array(LFP_temp2).mean(axis=0)
Y_temp = np.expand_dims(np.array(LFP_temp2),axis=3)
Y_ROI_all['Y'][roi] = xr.DataArray(Y_temp, dims=['trial', 'channel', 'time', 'cnd_id'],
coords=dict(trial=range(0, Y_temp.shape[0]), channel=lfp.channel.values, time=lfp.time.values[:Y_temp.shape[2]], cnd_id=[1]))
return Y_ROI_all | layer_selection | identifier_name |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind() != T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind() != T!['('] && tok.kind() != T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn | (path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> {
self.find_trait("core:convert:From")
}
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate;
Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db;
let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
}
| mod_path_to_ast | identifier_name |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind() != T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind() != T!['('] && tok.kind() != T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn mod_path_to_ast(path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> {
self.find_trait("core:convert:From")
}
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate; | let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
} | Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db; | random_line_split |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind() != T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind() != T!['('] && tok.kind() != T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn mod_path_to_ast(path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> |
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate;
Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db;
let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
}
| {
self.find_trait("core:convert:From")
} | identifier_body |
nanolog.go | // Copyright 2017 Scott Mansfield
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nanolog is a package to speed up your logging.
//
// The format string is inspired by the full fledged fmt.Fprintf function. The
// codes are unique to this package, so normal fmt documentation is not be applicable.
//
// The format string is similar to fmt in that it uses the percent sign (a.k.a.
// the modulo operator) to signify the start of a format code. The reader is
// greedy, meaning that the parser will attempt to read as much as it can for a
// code before it stops. E.g. if you have a generic int in the middle of your
// format string immediately followed by the number 1 and a space ("%i1 "), the
// parser may complain saying that it encountered an invalid code. To fix this,
// use curly braces after the percent sign to surround the code: "%{i}1 ".
//
// Kinds and their corresponding format codes:
//
// Kind | Code
// --------------|-------------
// Bool | b
// Int | i
// Int8 | i8
// Int16 | i16
// Int32 | i32
// Int64 | i64
// Uint | u
// Uint8 | u8
// Uint16 | u16
// Uint32 | u32
// Uint64 | u64
// Uintptr |
// Float32 | f32
// Float64 | f64
// Complex64 | c64
// Complex128 | c128
// Array |
// Chan |
// Func |
// Interface |
// Map |
// Ptr |
// Slice |
// String | s
// Struct |
// UnsafePointer |
//
// The file format has two categories of data:
//
// 1. Log line information to reconstruct logs later
// 2. Actual log entries
//
// The differentiation is done with the entryType, which is prefixed on to the record.
//
// The log line records are formatted as follows:
//
// - type: 1 byte - ETLogLine (1)
// - id: 4 bytes - little endian uint32
// - # of string segs: 4 bytes - little endian uint32
// - kinds: (#segs - 1) bytes, each being a reflect.Kind
// - segments:
// - string length: 4 bytes - little endian uint32
// - string data: ^length bytes
//
// The log entry records are formatted as follows:
//
// - type: 1 byte - ETLogEntry (2)
// - line id: 4 bytes - little endian uint32
// - data+: var bytes - all the corresponding data for the kinds in the log line entry
//
// The data is serialized as follows:
//
// - Bool: 1 byte
// - False: 0 or True: 1
//
// - String: 4 + len(string) bytes
// - Length: 4 bytes - little endian uint32
// - String bytes: Length bytes
//
// - int family:
// - int: 8 bytes - int64 as little endian uint64
// - int8: 1 byte
// - int16: 2 bytes - int16 as little endian uint16
// - int32: 4 bytes - int32 as little endian uint32
// - int64: 8 bytes - int64 as little endian uint64
//
// - uint family:
// - uint: 8 bytes - little endian uint64
// - uint8: 1 byte
// - uint16: 2 bytes - little endian uint16
// - uint32: 4 bytes - little endian uint32
// - uint64: 8 bytes - little endian uint64
//
// - float32:
// - 4 bytes as little endian uint32 from float32 bits
//
// - float64:
// - 8 bytes as little endian uint64 from float64 bits
//
// - complex64:
// - Real: 4 bytes as little endian uint32 from float32 bits
// - Complex: 4 bytes as little endian uint32 from float32 bits
//
// - complex128:
// - Real: 8 bytes as little endian uint64 from float64 bits
// - Complex: 8 bytes as little endian uint64 from float64 bits
package nanolog
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"reflect"
"strings"
"sync"
"sync/atomic"
"unicode/utf8"
)
// MaxLoggers is the maximum number of different loggers that are allowed
const MaxLoggers = 10240
// Handle is a simple handle to an internal logging data structure
// LogHandles are returned by the AddLogger method and used by the Log method to
// actually log data.
type Handle uint32
// EntryType is an enum that represents the record headers in the output files to
// differentiate between log lines and log entries
type EntryType byte
const (
// ETInvalid is an invalid EntryType
ETInvalid EntryType = iota
// ETLogLine means the log line data for a single call to AddLogger is ahead
ETLogLine
// ETLogEntry means the log data for a single call to Log is ahead
ETLogEntry
)
// Logger is the internal struct representing the runtime state of the loggers.
// The Segs field is not used during logging; it is only used in the inflate
// utility but is kept during execution in case it is needed for debugging
type Logger struct {
Kinds []reflect.Kind
Segs []string
}
var defaultLogWriter = New()
type LogWriter interface {
// SetWriter will set up efficient writing for the log to the output stream given.
// A raw IO stream is best. The first time SetWriter is called any logs that were
// created or posted before the call will be sent to the writer all in one go.
SetWriter(new io.Writer) error
// Flush ensures all log entries written up to this point are written to the underlying io.Writer
Flush() error
// AddLogger initializes a logger and returns a handle for future logging
AddLogger(fmt string) Handle
// Log logs to the output stream
Log(handle Handle, args ...interface{}) error
// Debug dump of information about a handle
DebugDump(handle Handle) string
}
type logWriter struct {
initBuf *bytes.Buffer
w *bufio.Writer
firstSet bool
writeLock sync.Locker
loggers []Logger
curLoggersIdx *uint32
}
// New creates a new LogWriter
func New() LogWriter {
initBuf := &bytes.Buffer{}
return &logWriter{
initBuf: initBuf,
w: bufio.NewWriter(initBuf),
firstSet: true,
writeLock: new(sync.Mutex),
loggers: make([]Logger, MaxLoggers),
curLoggersIdx: new(uint32),
}
}
// SetWriter calls LogWriter.SetWriter on the default log writer.
func SetWriter(new io.Writer) error {
return defaultLogWriter.SetWriter(new)
}
func (lw *logWriter) SetWriter(new io.Writer) error {
// grab write lock to ensure no problems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
if err := lw.w.Flush(); err != nil {
return err
}
lw.w = bufio.NewWriter(new)
if lw.firstSet {
lw.firstSet = false
if _, err := lw.initBuf.WriteTo(lw.w); err != nil {
return err
}
}
return nil
}
// Flush calls LogWriter.Flush on the default log writer.
func Flush() error {
return defaultLogWriter.Flush()
}
func (lw *logWriter) Flush() error {
// grab write lock to ensure no prblems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
return lw.w.Flush()
}
// AddLogger calls LogWriter.AddLogger on the default log writer.
func AddLogger(fmt string) Handle {
return defaultLogWriter.AddLogger(fmt)
} |
if idx >= MaxLoggers {
panic("Too many loggers")
}
l := parseLogLine(fmt)
lw.loggers[idx] = l
lw.writeLogLineHeader(idx, l.Kinds, l.Segs)
return Handle(idx)
}
func parseLogLine(gold string) Logger {
// make a copy we can destroy
tmp := gold
f := &tmp
var kinds []reflect.Kind
var segs []string
var curseg []rune
for len(*f) > 0 {
if r := next(f); r != '%' {
curseg = append(curseg, r)
continue
}
// Literal % sign
if peek(f) == '%' {
next(f)
curseg = append(curseg, '%')
continue
}
segs = append(segs, string(curseg))
curseg = curseg[:0]
var requireBrace bool
// Optional curly braces around format
r := next(f)
if r == '{' {
requireBrace = true
r = next(f)
}
// optimized parse tree
switch r {
case 'b':
kinds = append(kinds, reflect.Bool)
case 's':
kinds = append(kinds, reflect.String)
case 'i':
if len(*f) == 0 {
kinds = append(kinds, reflect.Int)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Int8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting i16.", gold)
}
kinds = append(kinds, reflect.Int16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting i32.", gold)
}
kinds = append(kinds, reflect.Int32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting i64.", gold)
}
kinds = append(kinds, reflect.Int64)
default:
kinds = append(kinds, reflect.Int)
}
case 'u':
if len(*f) == 0 {
kinds = append(kinds, reflect.Uint)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Uint8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting u16.", gold)
}
kinds = append(kinds, reflect.Uint16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting u32.", gold)
}
kinds = append(kinds, reflect.Uint32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting u64.", gold)
}
kinds = append(kinds, reflect.Uint64)
default:
kinds = append(kinds, reflect.Uint)
}
case 'f':
r := peek(f)
switch r {
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting f32.", gold)
}
kinds = append(kinds, reflect.Float32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting f64.", gold)
}
kinds = append(kinds, reflect.Float64)
default:
logpanic("Expecting either f32 or f64", gold)
}
case 'c':
r := peek(f)
switch r {
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting c64.", gold)
}
kinds = append(kinds, reflect.Complex64)
case '1':
next(f)
if next(f) != '2' {
logpanic("Was expecting c128.", gold)
}
if next(f) != '8' {
logpanic("Was expecting c128.", gold)
}
kinds = append(kinds, reflect.Complex128)
default:
logpanic("Expecting either c64 or c128", gold)
}
default:
logpanic("Invalid replace sequence", gold)
}
if requireBrace {
if len(*f) == 0 {
logpanic("Missing '}' character at end of line", gold)
}
if next(f) != '}' {
logpanic("Missing '}' character", gold)
}
}
}
segs = append(segs, string(curseg))
return Logger{
Kinds: kinds,
Segs: segs,
}
}
func peek(s *string) rune {
r, _ := utf8.DecodeRuneInString(*s)
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func next(s *string) rune {
r, n := utf8.DecodeRuneInString(*s)
*s = (*s)[n:]
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func (lw *logWriter) writeLogLineHeader(idx uint32, kinds []reflect.Kind, segs []string) {
buf := &bytes.Buffer{}
b := make([]byte, 4)
// write log line record identifier
buf.WriteByte(byte(ETLogLine))
// write log identifier
binary.LittleEndian.PutUint32(b, idx)
buf.Write(b)
// write number of string segments between variable parts
// we don't need to write the number of kinds here because it is always
// equal to the number of segments minus 1
if len(segs) > math.MaxUint32 {
// what the hell are you logging?!
panic("Too many log line segments")
}
binary.LittleEndian.PutUint32(b, uint32(len(segs)))
buf.Write(b)
// write out all the kinds. These are cast to a byte because their values all
// fit into a byte and it saves a little space
for _, k := range kinds {
buf.WriteByte(byte(k))
}
// write all the segments, lengths first then string bytes for each
for _, s := range segs {
binary.LittleEndian.PutUint32(b, uint32(len(s)))
buf.Write(b)
buf.WriteString(s)
}
// finally write all of it together to the output
lw.w.Write(buf.Bytes())
}
// helper function to have consistently formatted panics and shorter code above
func logpanic(msg, gold string) {
panic(fmt.Sprintf("Malformed log format string. %s.\n%s", msg, gold))
}
var (
bufpool = &sync.Pool{
New: func() interface{} {
temp := make([]byte, 1024) // 1k default size
return &temp
},
}
)
// Log calls LogWriter.Log on the default log writer.
func Log(handle Handle, args ...interface{}) error {
return defaultLogWriter.Log(handle, args...)
}
func (lw *logWriter) Log(handle Handle, args ...interface{}) error {
l := lw.loggers[handle]
if len(l.Kinds) != len(args) {
panic("Number of args does not match log line")
}
buf := bufpool.Get().(*[]byte)
*buf = (*buf)[:0]
b := make([]byte, 8)
*buf = append(*buf, byte(ETLogEntry))
binary.LittleEndian.PutUint32(b, uint32(handle))
*buf = append(*buf, b[:4]...)
for idx := range l.Kinds {
if l.Kinds[idx] != reflect.TypeOf(args[idx]).Kind() {
panic("Argument type does not match log line")
}
// write serialized version to writer
switch l.Kinds[idx] {
case reflect.Bool:
if args[idx].(bool) {
*buf = append(*buf, 1)
} else {
*buf = append(*buf, 0)
}
case reflect.String:
s := args[idx].(string)
binary.LittleEndian.PutUint32(b, uint32(len(s)))
*buf = append(*buf, b[:4]...)
*buf = append(*buf, s...)
// ints
case reflect.Int:
// Assume generic int is 64 bit
i := args[idx].(int)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Int8:
i := args[idx].(int8)
*buf = append(*buf, byte(i))
case reflect.Int16:
i := args[idx].(int16)
binary.LittleEndian.PutUint16(b, uint16(i))
*buf = append(*buf, b[:2]...)
case reflect.Int32:
i := args[idx].(int32)
binary.LittleEndian.PutUint32(b, uint32(i))
*buf = append(*buf, b[:4]...)
case reflect.Int64:
i := args[idx].(int64)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
// uints
case reflect.Uint:
// Assume generic uint is 64 bit
i := args[idx].(uint)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Uint8:
i := args[idx].(uint8)
*buf = append(*buf, byte(i))
case reflect.Uint16:
i := args[idx].(uint16)
binary.LittleEndian.PutUint16(b, i)
*buf = append(*buf, b[:2]...)
case reflect.Uint32:
i := args[idx].(uint32)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Uint64:
i := args[idx].(uint64)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// floats
case reflect.Float32:
f := args[idx].(float32)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Float64:
f := args[idx].(float64)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// complex
case reflect.Complex64:
c := args[idx].(complex64)
f := real(c)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
f = imag(c)
i = math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Complex128:
c := args[idx].(complex128)
f := real(c)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
f = imag(c)
i = math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
default:
panic(fmt.Sprintf("Invalid Kind in logger: %v", l.Kinds[idx]))
}
}
lw.writeLock.Lock()
_, err := lw.w.Write(*buf)
lw.writeLock.Unlock()
bufpool.Put(buf)
return err
}
// DebugDump calls LogWriter.DebugDump on the default log writer.
func DebugDump(handle Handle) string {
return defaultLogWriter.DebugDump(handle)
}
func (lw *logWriter) DebugDump(handle Handle) string {
sb := &strings.Builder{}
l := lw.loggers[handle]
for i := 0; i < len(l.Kinds); i++ {
sb.WriteString("+\"")
sb.WriteString(l.Segs[i])
sb.WriteString("\"+")
sb.WriteString("<")
sb.WriteString(l.Kinds[i].String())
sb.WriteString(">")
}
// write the last segment
sb.WriteString("+\"")
sb.WriteString(l.Segs[len(l.Segs)-1])
sb.WriteString("\"+")
return sb.String()
} |
func (lw *logWriter) AddLogger(fmt string) Handle {
// save some kind of string format to the file
idx := atomic.AddUint32(lw.curLoggersIdx, 1) - 1 | random_line_split |
nanolog.go | // Copyright 2017 Scott Mansfield
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nanolog is a package to speed up your logging.
//
// The format string is inspired by the full fledged fmt.Fprintf function. The
// codes are unique to this package, so normal fmt documentation is not be applicable.
//
// The format string is similar to fmt in that it uses the percent sign (a.k.a.
// the modulo operator) to signify the start of a format code. The reader is
// greedy, meaning that the parser will attempt to read as much as it can for a
// code before it stops. E.g. if you have a generic int in the middle of your
// format string immediately followed by the number 1 and a space ("%i1 "), the
// parser may complain saying that it encountered an invalid code. To fix this,
// use curly braces after the percent sign to surround the code: "%{i}1 ".
//
// Kinds and their corresponding format codes:
//
// Kind | Code
// --------------|-------------
// Bool | b
// Int | i
// Int8 | i8
// Int16 | i16
// Int32 | i32
// Int64 | i64
// Uint | u
// Uint8 | u8
// Uint16 | u16
// Uint32 | u32
// Uint64 | u64
// Uintptr |
// Float32 | f32
// Float64 | f64
// Complex64 | c64
// Complex128 | c128
// Array |
// Chan |
// Func |
// Interface |
// Map |
// Ptr |
// Slice |
// String | s
// Struct |
// UnsafePointer |
//
// The file format has two categories of data:
//
// 1. Log line information to reconstruct logs later
// 2. Actual log entries
//
// The differentiation is done with the entryType, which is prefixed on to the record.
//
// The log line records are formatted as follows:
//
// - type: 1 byte - ETLogLine (1)
// - id: 4 bytes - little endian uint32
// - # of string segs: 4 bytes - little endian uint32
// - kinds: (#segs - 1) bytes, each being a reflect.Kind
// - segments:
// - string length: 4 bytes - little endian uint32
// - string data: ^length bytes
//
// The log entry records are formatted as follows:
//
// - type: 1 byte - ETLogEntry (2)
// - line id: 4 bytes - little endian uint32
// - data+: var bytes - all the corresponding data for the kinds in the log line entry
//
// The data is serialized as follows:
//
// - Bool: 1 byte
// - False: 0 or True: 1
//
// - String: 4 + len(string) bytes
// - Length: 4 bytes - little endian uint32
// - String bytes: Length bytes
//
// - int family:
// - int: 8 bytes - int64 as little endian uint64
// - int8: 1 byte
// - int16: 2 bytes - int16 as little endian uint16
// - int32: 4 bytes - int32 as little endian uint32
// - int64: 8 bytes - int64 as little endian uint64
//
// - uint family:
// - uint: 8 bytes - little endian uint64
// - uint8: 1 byte
// - uint16: 2 bytes - little endian uint16
// - uint32: 4 bytes - little endian uint32
// - uint64: 8 bytes - little endian uint64
//
// - float32:
// - 4 bytes as little endian uint32 from float32 bits
//
// - float64:
// - 8 bytes as little endian uint64 from float64 bits
//
// - complex64:
// - Real: 4 bytes as little endian uint32 from float32 bits
// - Complex: 4 bytes as little endian uint32 from float32 bits
//
// - complex128:
// - Real: 8 bytes as little endian uint64 from float64 bits
// - Complex: 8 bytes as little endian uint64 from float64 bits
package nanolog
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"reflect"
"strings"
"sync"
"sync/atomic"
"unicode/utf8"
)
// MaxLoggers is the maximum number of different loggers that are allowed
const MaxLoggers = 10240
// Handle is a simple handle to an internal logging data structure
// LogHandles are returned by the AddLogger method and used by the Log method to
// actually log data.
type Handle uint32
// EntryType is an enum that represents the record headers in the output files to
// differentiate between log lines and log entries
type EntryType byte
const (
// ETInvalid is an invalid EntryType
ETInvalid EntryType = iota
// ETLogLine means the log line data for a single call to AddLogger is ahead
ETLogLine
// ETLogEntry means the log data for a single call to Log is ahead
ETLogEntry
)
// Logger is the internal struct representing the runtime state of the loggers.
// The Segs field is not used during logging; it is only used in the inflate
// utility but is kept during execution in case it is needed for debugging
type Logger struct {
Kinds []reflect.Kind
Segs []string
}
var defaultLogWriter = New()
type LogWriter interface {
// SetWriter will set up efficient writing for the log to the output stream given.
// A raw IO stream is best. The first time SetWriter is called any logs that were
// created or posted before the call will be sent to the writer all in one go.
SetWriter(new io.Writer) error
// Flush ensures all log entries written up to this point are written to the underlying io.Writer
Flush() error
// AddLogger initializes a logger and returns a handle for future logging
AddLogger(fmt string) Handle
// Log logs to the output stream
Log(handle Handle, args ...interface{}) error
// Debug dump of information about a handle
DebugDump(handle Handle) string
}
type logWriter struct {
initBuf *bytes.Buffer
w *bufio.Writer
firstSet bool
writeLock sync.Locker
loggers []Logger
curLoggersIdx *uint32
}
// New creates a new LogWriter
func New() LogWriter {
initBuf := &bytes.Buffer{}
return &logWriter{
initBuf: initBuf,
w: bufio.NewWriter(initBuf),
firstSet: true,
writeLock: new(sync.Mutex),
loggers: make([]Logger, MaxLoggers),
curLoggersIdx: new(uint32),
}
}
// SetWriter calls LogWriter.SetWriter on the default log writer.
func SetWriter(new io.Writer) error {
return defaultLogWriter.SetWriter(new)
}
func (lw *logWriter) SetWriter(new io.Writer) error {
// grab write lock to ensure no problems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
if err := lw.w.Flush(); err != nil {
return err
}
lw.w = bufio.NewWriter(new)
if lw.firstSet {
lw.firstSet = false
if _, err := lw.initBuf.WriteTo(lw.w); err != nil {
return err
}
}
return nil
}
// Flush calls LogWriter.Flush on the default log writer.
func Flush() error {
return defaultLogWriter.Flush()
}
func (lw *logWriter) Flush() error {
// grab write lock to ensure no prblems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
return lw.w.Flush()
}
// AddLogger calls LogWriter.AddLogger on the default log writer.
func AddLogger(fmt string) Handle {
return defaultLogWriter.AddLogger(fmt)
}
func (lw *logWriter) AddLogger(fmt string) Handle {
// save some kind of string format to the file
idx := atomic.AddUint32(lw.curLoggersIdx, 1) - 1
if idx >= MaxLoggers {
panic("Too many loggers")
}
l := parseLogLine(fmt)
lw.loggers[idx] = l
lw.writeLogLineHeader(idx, l.Kinds, l.Segs)
return Handle(idx)
}
func parseLogLine(gold string) Logger {
// make a copy we can destroy
tmp := gold
f := &tmp
var kinds []reflect.Kind
var segs []string
var curseg []rune
for len(*f) > 0 {
if r := next(f); r != '%' {
curseg = append(curseg, r)
continue
}
// Literal % sign
if peek(f) == '%' {
next(f)
curseg = append(curseg, '%')
continue
}
segs = append(segs, string(curseg))
curseg = curseg[:0]
var requireBrace bool
// Optional curly braces around format
r := next(f)
if r == '{' {
requireBrace = true
r = next(f)
}
// optimized parse tree
switch r {
case 'b':
kinds = append(kinds, reflect.Bool)
case 's':
kinds = append(kinds, reflect.String)
case 'i':
if len(*f) == 0 {
kinds = append(kinds, reflect.Int)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Int8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting i16.", gold)
}
kinds = append(kinds, reflect.Int16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting i32.", gold)
}
kinds = append(kinds, reflect.Int32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting i64.", gold)
}
kinds = append(kinds, reflect.Int64)
default:
kinds = append(kinds, reflect.Int)
}
case 'u':
if len(*f) == 0 {
kinds = append(kinds, reflect.Uint)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Uint8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting u16.", gold)
}
kinds = append(kinds, reflect.Uint16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting u32.", gold)
}
kinds = append(kinds, reflect.Uint32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting u64.", gold)
}
kinds = append(kinds, reflect.Uint64)
default:
kinds = append(kinds, reflect.Uint)
}
case 'f':
r := peek(f)
switch r {
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting f32.", gold)
}
kinds = append(kinds, reflect.Float32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting f64.", gold)
}
kinds = append(kinds, reflect.Float64)
default:
logpanic("Expecting either f32 or f64", gold)
}
case 'c':
r := peek(f)
switch r {
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting c64.", gold)
}
kinds = append(kinds, reflect.Complex64)
case '1':
next(f)
if next(f) != '2' {
logpanic("Was expecting c128.", gold)
}
if next(f) != '8' {
logpanic("Was expecting c128.", gold)
}
kinds = append(kinds, reflect.Complex128)
default:
logpanic("Expecting either c64 or c128", gold)
}
default:
logpanic("Invalid replace sequence", gold)
}
if requireBrace |
}
segs = append(segs, string(curseg))
return Logger{
Kinds: kinds,
Segs: segs,
}
}
func peek(s *string) rune {
r, _ := utf8.DecodeRuneInString(*s)
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func next(s *string) rune {
r, n := utf8.DecodeRuneInString(*s)
*s = (*s)[n:]
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func (lw *logWriter) writeLogLineHeader(idx uint32, kinds []reflect.Kind, segs []string) {
buf := &bytes.Buffer{}
b := make([]byte, 4)
// write log line record identifier
buf.WriteByte(byte(ETLogLine))
// write log identifier
binary.LittleEndian.PutUint32(b, idx)
buf.Write(b)
// write number of string segments between variable parts
// we don't need to write the number of kinds here because it is always
// equal to the number of segments minus 1
if len(segs) > math.MaxUint32 {
// what the hell are you logging?!
panic("Too many log line segments")
}
binary.LittleEndian.PutUint32(b, uint32(len(segs)))
buf.Write(b)
// write out all the kinds. These are cast to a byte because their values all
// fit into a byte and it saves a little space
for _, k := range kinds {
buf.WriteByte(byte(k))
}
// write all the segments, lengths first then string bytes for each
for _, s := range segs {
binary.LittleEndian.PutUint32(b, uint32(len(s)))
buf.Write(b)
buf.WriteString(s)
}
// finally write all of it together to the output
lw.w.Write(buf.Bytes())
}
// helper function to have consistently formatted panics and shorter code above
func logpanic(msg, gold string) {
panic(fmt.Sprintf("Malformed log format string. %s.\n%s", msg, gold))
}
var (
bufpool = &sync.Pool{
New: func() interface{} {
temp := make([]byte, 1024) // 1k default size
return &temp
},
}
)
// Log calls LogWriter.Log on the default log writer.
func Log(handle Handle, args ...interface{}) error {
return defaultLogWriter.Log(handle, args...)
}
func (lw *logWriter) Log(handle Handle, args ...interface{}) error {
l := lw.loggers[handle]
if len(l.Kinds) != len(args) {
panic("Number of args does not match log line")
}
buf := bufpool.Get().(*[]byte)
*buf = (*buf)[:0]
b := make([]byte, 8)
*buf = append(*buf, byte(ETLogEntry))
binary.LittleEndian.PutUint32(b, uint32(handle))
*buf = append(*buf, b[:4]...)
for idx := range l.Kinds {
if l.Kinds[idx] != reflect.TypeOf(args[idx]).Kind() {
panic("Argument type does not match log line")
}
// write serialized version to writer
switch l.Kinds[idx] {
case reflect.Bool:
if args[idx].(bool) {
*buf = append(*buf, 1)
} else {
*buf = append(*buf, 0)
}
case reflect.String:
s := args[idx].(string)
binary.LittleEndian.PutUint32(b, uint32(len(s)))
*buf = append(*buf, b[:4]...)
*buf = append(*buf, s...)
// ints
case reflect.Int:
// Assume generic int is 64 bit
i := args[idx].(int)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Int8:
i := args[idx].(int8)
*buf = append(*buf, byte(i))
case reflect.Int16:
i := args[idx].(int16)
binary.LittleEndian.PutUint16(b, uint16(i))
*buf = append(*buf, b[:2]...)
case reflect.Int32:
i := args[idx].(int32)
binary.LittleEndian.PutUint32(b, uint32(i))
*buf = append(*buf, b[:4]...)
case reflect.Int64:
i := args[idx].(int64)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
// uints
case reflect.Uint:
// Assume generic uint is 64 bit
i := args[idx].(uint)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Uint8:
i := args[idx].(uint8)
*buf = append(*buf, byte(i))
case reflect.Uint16:
i := args[idx].(uint16)
binary.LittleEndian.PutUint16(b, i)
*buf = append(*buf, b[:2]...)
case reflect.Uint32:
i := args[idx].(uint32)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Uint64:
i := args[idx].(uint64)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// floats
case reflect.Float32:
f := args[idx].(float32)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Float64:
f := args[idx].(float64)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// complex
case reflect.Complex64:
c := args[idx].(complex64)
f := real(c)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
f = imag(c)
i = math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Complex128:
c := args[idx].(complex128)
f := real(c)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
f = imag(c)
i = math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
default:
panic(fmt.Sprintf("Invalid Kind in logger: %v", l.Kinds[idx]))
}
}
lw.writeLock.Lock()
_, err := lw.w.Write(*buf)
lw.writeLock.Unlock()
bufpool.Put(buf)
return err
}
// DebugDump calls LogWriter.DebugDump on the default log writer.
func DebugDump(handle Handle) string {
return defaultLogWriter.DebugDump(handle)
}
func (lw *logWriter) DebugDump(handle Handle) string {
sb := &strings.Builder{}
l := lw.loggers[handle]
for i := 0; i < len(l.Kinds); i++ {
sb.WriteString("+\"")
sb.WriteString(l.Segs[i])
sb.WriteString("\"+")
sb.WriteString("<")
sb.WriteString(l.Kinds[i].String())
sb.WriteString(">")
}
// write the last segment
sb.WriteString("+\"")
sb.WriteString(l.Segs[len(l.Segs)-1])
sb.WriteString("\"+")
return sb.String()
}
| {
if len(*f) == 0 {
logpanic("Missing '}' character at end of line", gold)
}
if next(f) != '}' {
logpanic("Missing '}' character", gold)
}
} | conditional_block |
nanolog.go | // Copyright 2017 Scott Mansfield
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nanolog is a package to speed up your logging.
//
// The format string is inspired by the full fledged fmt.Fprintf function. The
// codes are unique to this package, so normal fmt documentation is not be applicable.
//
// The format string is similar to fmt in that it uses the percent sign (a.k.a.
// the modulo operator) to signify the start of a format code. The reader is
// greedy, meaning that the parser will attempt to read as much as it can for a
// code before it stops. E.g. if you have a generic int in the middle of your
// format string immediately followed by the number 1 and a space ("%i1 "), the
// parser may complain saying that it encountered an invalid code. To fix this,
// use curly braces after the percent sign to surround the code: "%{i}1 ".
//
// Kinds and their corresponding format codes:
//
// Kind | Code
// --------------|-------------
// Bool | b
// Int | i
// Int8 | i8
// Int16 | i16
// Int32 | i32
// Int64 | i64
// Uint | u
// Uint8 | u8
// Uint16 | u16
// Uint32 | u32
// Uint64 | u64
// Uintptr |
// Float32 | f32
// Float64 | f64
// Complex64 | c64
// Complex128 | c128
// Array |
// Chan |
// Func |
// Interface |
// Map |
// Ptr |
// Slice |
// String | s
// Struct |
// UnsafePointer |
//
// The file format has two categories of data:
//
// 1. Log line information to reconstruct logs later
// 2. Actual log entries
//
// The differentiation is done with the entryType, which is prefixed on to the record.
//
// The log line records are formatted as follows:
//
// - type: 1 byte - ETLogLine (1)
// - id: 4 bytes - little endian uint32
// - # of string segs: 4 bytes - little endian uint32
// - kinds: (#segs - 1) bytes, each being a reflect.Kind
// - segments:
// - string length: 4 bytes - little endian uint32
// - string data: ^length bytes
//
// The log entry records are formatted as follows:
//
// - type: 1 byte - ETLogEntry (2)
// - line id: 4 bytes - little endian uint32
// - data+: var bytes - all the corresponding data for the kinds in the log line entry
//
// The data is serialized as follows:
//
// - Bool: 1 byte
// - False: 0 or True: 1
//
// - String: 4 + len(string) bytes
// - Length: 4 bytes - little endian uint32
// - String bytes: Length bytes
//
// - int family:
// - int: 8 bytes - int64 as little endian uint64
// - int8: 1 byte
// - int16: 2 bytes - int16 as little endian uint16
// - int32: 4 bytes - int32 as little endian uint32
// - int64: 8 bytes - int64 as little endian uint64
//
// - uint family:
// - uint: 8 bytes - little endian uint64
// - uint8: 1 byte
// - uint16: 2 bytes - little endian uint16
// - uint32: 4 bytes - little endian uint32
// - uint64: 8 bytes - little endian uint64
//
// - float32:
// - 4 bytes as little endian uint32 from float32 bits
//
// - float64:
// - 8 bytes as little endian uint64 from float64 bits
//
// - complex64:
// - Real: 4 bytes as little endian uint32 from float32 bits
// - Complex: 4 bytes as little endian uint32 from float32 bits
//
// - complex128:
// - Real: 8 bytes as little endian uint64 from float64 bits
// - Complex: 8 bytes as little endian uint64 from float64 bits
package nanolog
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"reflect"
"strings"
"sync"
"sync/atomic"
"unicode/utf8"
)
// MaxLoggers is the maximum number of different loggers that are allowed
const MaxLoggers = 10240
// Handle is a simple handle to an internal logging data structure
// LogHandles are returned by the AddLogger method and used by the Log method to
// actually log data.
type Handle uint32
// EntryType is an enum that represents the record headers in the output files to
// differentiate between log lines and log entries
type EntryType byte
const (
// ETInvalid is an invalid EntryType
ETInvalid EntryType = iota
// ETLogLine means the log line data for a single call to AddLogger is ahead
ETLogLine
// ETLogEntry means the log data for a single call to Log is ahead
ETLogEntry
)
// Logger is the internal struct representing the runtime state of the loggers.
// The Segs field is not used during logging; it is only used in the inflate
// utility but is kept during execution in case it is needed for debugging
type Logger struct {
Kinds []reflect.Kind
Segs []string
}
var defaultLogWriter = New()
type LogWriter interface {
// SetWriter will set up efficient writing for the log to the output stream given.
// A raw IO stream is best. The first time SetWriter is called any logs that were
// created or posted before the call will be sent to the writer all in one go.
SetWriter(new io.Writer) error
// Flush ensures all log entries written up to this point are written to the underlying io.Writer
Flush() error
// AddLogger initializes a logger and returns a handle for future logging
AddLogger(fmt string) Handle
// Log logs to the output stream
Log(handle Handle, args ...interface{}) error
// Debug dump of information about a handle
DebugDump(handle Handle) string
}
type logWriter struct {
initBuf *bytes.Buffer
w *bufio.Writer
firstSet bool
writeLock sync.Locker
loggers []Logger
curLoggersIdx *uint32
}
// New creates a new LogWriter
func New() LogWriter {
initBuf := &bytes.Buffer{}
return &logWriter{
initBuf: initBuf,
w: bufio.NewWriter(initBuf),
firstSet: true,
writeLock: new(sync.Mutex),
loggers: make([]Logger, MaxLoggers),
curLoggersIdx: new(uint32),
}
}
// SetWriter calls LogWriter.SetWriter on the default log writer.
func SetWriter(new io.Writer) error {
return defaultLogWriter.SetWriter(new)
}
func (lw *logWriter) SetWriter(new io.Writer) error {
// grab write lock to ensure no problems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
if err := lw.w.Flush(); err != nil {
return err
}
lw.w = bufio.NewWriter(new)
if lw.firstSet {
lw.firstSet = false
if _, err := lw.initBuf.WriteTo(lw.w); err != nil {
return err
}
}
return nil
}
// Flush calls LogWriter.Flush on the default log writer.
func Flush() error {
return defaultLogWriter.Flush()
}
func (lw *logWriter) Flush() error {
// grab write lock to ensure no prblems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
return lw.w.Flush()
}
// AddLogger calls LogWriter.AddLogger on the default log writer.
func AddLogger(fmt string) Handle {
return defaultLogWriter.AddLogger(fmt)
}
func (lw *logWriter) AddLogger(fmt string) Handle {
// save some kind of string format to the file
idx := atomic.AddUint32(lw.curLoggersIdx, 1) - 1
if idx >= MaxLoggers {
panic("Too many loggers")
}
l := parseLogLine(fmt)
lw.loggers[idx] = l
lw.writeLogLineHeader(idx, l.Kinds, l.Segs)
return Handle(idx)
}
func | (gold string) Logger {
// make a copy we can destroy
tmp := gold
f := &tmp
var kinds []reflect.Kind
var segs []string
var curseg []rune
for len(*f) > 0 {
if r := next(f); r != '%' {
curseg = append(curseg, r)
continue
}
// Literal % sign
if peek(f) == '%' {
next(f)
curseg = append(curseg, '%')
continue
}
segs = append(segs, string(curseg))
curseg = curseg[:0]
var requireBrace bool
// Optional curly braces around format
r := next(f)
if r == '{' {
requireBrace = true
r = next(f)
}
// optimized parse tree
switch r {
case 'b':
kinds = append(kinds, reflect.Bool)
case 's':
kinds = append(kinds, reflect.String)
case 'i':
if len(*f) == 0 {
kinds = append(kinds, reflect.Int)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Int8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting i16.", gold)
}
kinds = append(kinds, reflect.Int16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting i32.", gold)
}
kinds = append(kinds, reflect.Int32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting i64.", gold)
}
kinds = append(kinds, reflect.Int64)
default:
kinds = append(kinds, reflect.Int)
}
case 'u':
if len(*f) == 0 {
kinds = append(kinds, reflect.Uint)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Uint8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting u16.", gold)
}
kinds = append(kinds, reflect.Uint16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting u32.", gold)
}
kinds = append(kinds, reflect.Uint32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting u64.", gold)
}
kinds = append(kinds, reflect.Uint64)
default:
kinds = append(kinds, reflect.Uint)
}
case 'f':
r := peek(f)
switch r {
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting f32.", gold)
}
kinds = append(kinds, reflect.Float32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting f64.", gold)
}
kinds = append(kinds, reflect.Float64)
default:
logpanic("Expecting either f32 or f64", gold)
}
case 'c':
r := peek(f)
switch r {
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting c64.", gold)
}
kinds = append(kinds, reflect.Complex64)
case '1':
next(f)
if next(f) != '2' {
logpanic("Was expecting c128.", gold)
}
if next(f) != '8' {
logpanic("Was expecting c128.", gold)
}
kinds = append(kinds, reflect.Complex128)
default:
logpanic("Expecting either c64 or c128", gold)
}
default:
logpanic("Invalid replace sequence", gold)
}
if requireBrace {
if len(*f) == 0 {
logpanic("Missing '}' character at end of line", gold)
}
if next(f) != '}' {
logpanic("Missing '}' character", gold)
}
}
}
segs = append(segs, string(curseg))
return Logger{
Kinds: kinds,
Segs: segs,
}
}
func peek(s *string) rune {
r, _ := utf8.DecodeRuneInString(*s)
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func next(s *string) rune {
r, n := utf8.DecodeRuneInString(*s)
*s = (*s)[n:]
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func (lw *logWriter) writeLogLineHeader(idx uint32, kinds []reflect.Kind, segs []string) {
buf := &bytes.Buffer{}
b := make([]byte, 4)
// write log line record identifier
buf.WriteByte(byte(ETLogLine))
// write log identifier
binary.LittleEndian.PutUint32(b, idx)
buf.Write(b)
// write number of string segments between variable parts
// we don't need to write the number of kinds here because it is always
// equal to the number of segments minus 1
if len(segs) > math.MaxUint32 {
// what the hell are you logging?!
panic("Too many log line segments")
}
binary.LittleEndian.PutUint32(b, uint32(len(segs)))
buf.Write(b)
// write out all the kinds. These are cast to a byte because their values all
// fit into a byte and it saves a little space
for _, k := range kinds {
buf.WriteByte(byte(k))
}
// write all the segments, lengths first then string bytes for each
for _, s := range segs {
binary.LittleEndian.PutUint32(b, uint32(len(s)))
buf.Write(b)
buf.WriteString(s)
}
// finally write all of it together to the output
lw.w.Write(buf.Bytes())
}
// helper function to have consistently formatted panics and shorter code above
func logpanic(msg, gold string) {
panic(fmt.Sprintf("Malformed log format string. %s.\n%s", msg, gold))
}
var (
bufpool = &sync.Pool{
New: func() interface{} {
temp := make([]byte, 1024) // 1k default size
return &temp
},
}
)
// Log calls LogWriter.Log on the default log writer.
func Log(handle Handle, args ...interface{}) error {
return defaultLogWriter.Log(handle, args...)
}
func (lw *logWriter) Log(handle Handle, args ...interface{}) error {
l := lw.loggers[handle]
if len(l.Kinds) != len(args) {
panic("Number of args does not match log line")
}
buf := bufpool.Get().(*[]byte)
*buf = (*buf)[:0]
b := make([]byte, 8)
*buf = append(*buf, byte(ETLogEntry))
binary.LittleEndian.PutUint32(b, uint32(handle))
*buf = append(*buf, b[:4]...)
for idx := range l.Kinds {
if l.Kinds[idx] != reflect.TypeOf(args[idx]).Kind() {
panic("Argument type does not match log line")
}
// write serialized version to writer
switch l.Kinds[idx] {
case reflect.Bool:
if args[idx].(bool) {
*buf = append(*buf, 1)
} else {
*buf = append(*buf, 0)
}
case reflect.String:
s := args[idx].(string)
binary.LittleEndian.PutUint32(b, uint32(len(s)))
*buf = append(*buf, b[:4]...)
*buf = append(*buf, s...)
// ints
case reflect.Int:
// Assume generic int is 64 bit
i := args[idx].(int)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Int8:
i := args[idx].(int8)
*buf = append(*buf, byte(i))
case reflect.Int16:
i := args[idx].(int16)
binary.LittleEndian.PutUint16(b, uint16(i))
*buf = append(*buf, b[:2]...)
case reflect.Int32:
i := args[idx].(int32)
binary.LittleEndian.PutUint32(b, uint32(i))
*buf = append(*buf, b[:4]...)
case reflect.Int64:
i := args[idx].(int64)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
// uints
case reflect.Uint:
// Assume generic uint is 64 bit
i := args[idx].(uint)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Uint8:
i := args[idx].(uint8)
*buf = append(*buf, byte(i))
case reflect.Uint16:
i := args[idx].(uint16)
binary.LittleEndian.PutUint16(b, i)
*buf = append(*buf, b[:2]...)
case reflect.Uint32:
i := args[idx].(uint32)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Uint64:
i := args[idx].(uint64)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// floats
case reflect.Float32:
f := args[idx].(float32)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Float64:
f := args[idx].(float64)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// complex
case reflect.Complex64:
c := args[idx].(complex64)
f := real(c)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
f = imag(c)
i = math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Complex128:
c := args[idx].(complex128)
f := real(c)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
f = imag(c)
i = math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
default:
panic(fmt.Sprintf("Invalid Kind in logger: %v", l.Kinds[idx]))
}
}
lw.writeLock.Lock()
_, err := lw.w.Write(*buf)
lw.writeLock.Unlock()
bufpool.Put(buf)
return err
}
// DebugDump calls LogWriter.DebugDump on the default log writer.
func DebugDump(handle Handle) string {
return defaultLogWriter.DebugDump(handle)
}
func (lw *logWriter) DebugDump(handle Handle) string {
sb := &strings.Builder{}
l := lw.loggers[handle]
for i := 0; i < len(l.Kinds); i++ {
sb.WriteString("+\"")
sb.WriteString(l.Segs[i])
sb.WriteString("\"+")
sb.WriteString("<")
sb.WriteString(l.Kinds[i].String())
sb.WriteString(">")
}
// write the last segment
sb.WriteString("+\"")
sb.WriteString(l.Segs[len(l.Segs)-1])
sb.WriteString("\"+")
return sb.String()
}
| parseLogLine | identifier_name |
nanolog.go | // Copyright 2017 Scott Mansfield
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nanolog is a package to speed up your logging.
//
// The format string is inspired by the full fledged fmt.Fprintf function. The
// codes are unique to this package, so normal fmt documentation is not be applicable.
//
// The format string is similar to fmt in that it uses the percent sign (a.k.a.
// the modulo operator) to signify the start of a format code. The reader is
// greedy, meaning that the parser will attempt to read as much as it can for a
// code before it stops. E.g. if you have a generic int in the middle of your
// format string immediately followed by the number 1 and a space ("%i1 "), the
// parser may complain saying that it encountered an invalid code. To fix this,
// use curly braces after the percent sign to surround the code: "%{i}1 ".
//
// Kinds and their corresponding format codes:
//
// Kind | Code
// --------------|-------------
// Bool | b
// Int | i
// Int8 | i8
// Int16 | i16
// Int32 | i32
// Int64 | i64
// Uint | u
// Uint8 | u8
// Uint16 | u16
// Uint32 | u32
// Uint64 | u64
// Uintptr |
// Float32 | f32
// Float64 | f64
// Complex64 | c64
// Complex128 | c128
// Array |
// Chan |
// Func |
// Interface |
// Map |
// Ptr |
// Slice |
// String | s
// Struct |
// UnsafePointer |
//
// The file format has two categories of data:
//
// 1. Log line information to reconstruct logs later
// 2. Actual log entries
//
// The differentiation is done with the entryType, which is prefixed on to the record.
//
// The log line records are formatted as follows:
//
// - type: 1 byte - ETLogLine (1)
// - id: 4 bytes - little endian uint32
// - # of string segs: 4 bytes - little endian uint32
// - kinds: (#segs - 1) bytes, each being a reflect.Kind
// - segments:
// - string length: 4 bytes - little endian uint32
// - string data: ^length bytes
//
// The log entry records are formatted as follows:
//
// - type: 1 byte - ETLogEntry (2)
// - line id: 4 bytes - little endian uint32
// - data+: var bytes - all the corresponding data for the kinds in the log line entry
//
// The data is serialized as follows:
//
// - Bool: 1 byte
// - False: 0 or True: 1
//
// - String: 4 + len(string) bytes
// - Length: 4 bytes - little endian uint32
// - String bytes: Length bytes
//
// - int family:
// - int: 8 bytes - int64 as little endian uint64
// - int8: 1 byte
// - int16: 2 bytes - int16 as little endian uint16
// - int32: 4 bytes - int32 as little endian uint32
// - int64: 8 bytes - int64 as little endian uint64
//
// - uint family:
// - uint: 8 bytes - little endian uint64
// - uint8: 1 byte
// - uint16: 2 bytes - little endian uint16
// - uint32: 4 bytes - little endian uint32
// - uint64: 8 bytes - little endian uint64
//
// - float32:
// - 4 bytes as little endian uint32 from float32 bits
//
// - float64:
// - 8 bytes as little endian uint64 from float64 bits
//
// - complex64:
// - Real: 4 bytes as little endian uint32 from float32 bits
// - Complex: 4 bytes as little endian uint32 from float32 bits
//
// - complex128:
// - Real: 8 bytes as little endian uint64 from float64 bits
// - Complex: 8 bytes as little endian uint64 from float64 bits
package nanolog
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"reflect"
"strings"
"sync"
"sync/atomic"
"unicode/utf8"
)
// MaxLoggers is the maximum number of different loggers that are allowed
const MaxLoggers = 10240
// Handle is a simple handle to an internal logging data structure
// LogHandles are returned by the AddLogger method and used by the Log method to
// actually log data.
type Handle uint32
// EntryType is an enum that represents the record headers in the output files to
// differentiate between log lines and log entries
type EntryType byte
const (
// ETInvalid is an invalid EntryType
ETInvalid EntryType = iota
// ETLogLine means the log line data for a single call to AddLogger is ahead
ETLogLine
// ETLogEntry means the log data for a single call to Log is ahead
ETLogEntry
)
// Logger is the internal struct representing the runtime state of the loggers.
// The Segs field is not used during logging; it is only used in the inflate
// utility but is kept during execution in case it is needed for debugging
type Logger struct {
Kinds []reflect.Kind
Segs []string
}
var defaultLogWriter = New()
type LogWriter interface {
// SetWriter will set up efficient writing for the log to the output stream given.
// A raw IO stream is best. The first time SetWriter is called any logs that were
// created or posted before the call will be sent to the writer all in one go.
SetWriter(new io.Writer) error
// Flush ensures all log entries written up to this point are written to the underlying io.Writer
Flush() error
// AddLogger initializes a logger and returns a handle for future logging
AddLogger(fmt string) Handle
// Log logs to the output stream
Log(handle Handle, args ...interface{}) error
// Debug dump of information about a handle
DebugDump(handle Handle) string
}
type logWriter struct {
initBuf *bytes.Buffer
w *bufio.Writer
firstSet bool
writeLock sync.Locker
loggers []Logger
curLoggersIdx *uint32
}
// New creates a new LogWriter
func New() LogWriter {
initBuf := &bytes.Buffer{}
return &logWriter{
initBuf: initBuf,
w: bufio.NewWriter(initBuf),
firstSet: true,
writeLock: new(sync.Mutex),
loggers: make([]Logger, MaxLoggers),
curLoggersIdx: new(uint32),
}
}
// SetWriter calls LogWriter.SetWriter on the default log writer.
func SetWriter(new io.Writer) error {
return defaultLogWriter.SetWriter(new)
}
func (lw *logWriter) SetWriter(new io.Writer) error {
// grab write lock to ensure no problems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
if err := lw.w.Flush(); err != nil {
return err
}
lw.w = bufio.NewWriter(new)
if lw.firstSet {
lw.firstSet = false
if _, err := lw.initBuf.WriteTo(lw.w); err != nil {
return err
}
}
return nil
}
// Flush calls LogWriter.Flush on the default log writer.
func Flush() error {
return defaultLogWriter.Flush()
}
func (lw *logWriter) Flush() error |
// AddLogger calls LogWriter.AddLogger on the default log writer.
func AddLogger(fmt string) Handle {
return defaultLogWriter.AddLogger(fmt)
}
func (lw *logWriter) AddLogger(fmt string) Handle {
// save some kind of string format to the file
idx := atomic.AddUint32(lw.curLoggersIdx, 1) - 1
if idx >= MaxLoggers {
panic("Too many loggers")
}
l := parseLogLine(fmt)
lw.loggers[idx] = l
lw.writeLogLineHeader(idx, l.Kinds, l.Segs)
return Handle(idx)
}
func parseLogLine(gold string) Logger {
// make a copy we can destroy
tmp := gold
f := &tmp
var kinds []reflect.Kind
var segs []string
var curseg []rune
for len(*f) > 0 {
if r := next(f); r != '%' {
curseg = append(curseg, r)
continue
}
// Literal % sign
if peek(f) == '%' {
next(f)
curseg = append(curseg, '%')
continue
}
segs = append(segs, string(curseg))
curseg = curseg[:0]
var requireBrace bool
// Optional curly braces around format
r := next(f)
if r == '{' {
requireBrace = true
r = next(f)
}
// optimized parse tree
switch r {
case 'b':
kinds = append(kinds, reflect.Bool)
case 's':
kinds = append(kinds, reflect.String)
case 'i':
if len(*f) == 0 {
kinds = append(kinds, reflect.Int)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Int8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting i16.", gold)
}
kinds = append(kinds, reflect.Int16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting i32.", gold)
}
kinds = append(kinds, reflect.Int32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting i64.", gold)
}
kinds = append(kinds, reflect.Int64)
default:
kinds = append(kinds, reflect.Int)
}
case 'u':
if len(*f) == 0 {
kinds = append(kinds, reflect.Uint)
break
}
r := peek(f)
switch r {
case '8':
next(f)
kinds = append(kinds, reflect.Uint8)
case '1':
next(f)
if next(f) != '6' {
logpanic("Was expecting u16.", gold)
}
kinds = append(kinds, reflect.Uint16)
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting u32.", gold)
}
kinds = append(kinds, reflect.Uint32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting u64.", gold)
}
kinds = append(kinds, reflect.Uint64)
default:
kinds = append(kinds, reflect.Uint)
}
case 'f':
r := peek(f)
switch r {
case '3':
next(f)
if next(f) != '2' {
logpanic("Was expecting f32.", gold)
}
kinds = append(kinds, reflect.Float32)
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting f64.", gold)
}
kinds = append(kinds, reflect.Float64)
default:
logpanic("Expecting either f32 or f64", gold)
}
case 'c':
r := peek(f)
switch r {
case '6':
next(f)
if next(f) != '4' {
logpanic("Was expecting c64.", gold)
}
kinds = append(kinds, reflect.Complex64)
case '1':
next(f)
if next(f) != '2' {
logpanic("Was expecting c128.", gold)
}
if next(f) != '8' {
logpanic("Was expecting c128.", gold)
}
kinds = append(kinds, reflect.Complex128)
default:
logpanic("Expecting either c64 or c128", gold)
}
default:
logpanic("Invalid replace sequence", gold)
}
if requireBrace {
if len(*f) == 0 {
logpanic("Missing '}' character at end of line", gold)
}
if next(f) != '}' {
logpanic("Missing '}' character", gold)
}
}
}
segs = append(segs, string(curseg))
return Logger{
Kinds: kinds,
Segs: segs,
}
}
func peek(s *string) rune {
r, _ := utf8.DecodeRuneInString(*s)
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func next(s *string) rune {
r, n := utf8.DecodeRuneInString(*s)
*s = (*s)[n:]
if r == utf8.RuneError {
panic("Malformed log string")
}
return r
}
func (lw *logWriter) writeLogLineHeader(idx uint32, kinds []reflect.Kind, segs []string) {
buf := &bytes.Buffer{}
b := make([]byte, 4)
// write log line record identifier
buf.WriteByte(byte(ETLogLine))
// write log identifier
binary.LittleEndian.PutUint32(b, idx)
buf.Write(b)
// write number of string segments between variable parts
// we don't need to write the number of kinds here because it is always
// equal to the number of segments minus 1
if len(segs) > math.MaxUint32 {
// what the hell are you logging?!
panic("Too many log line segments")
}
binary.LittleEndian.PutUint32(b, uint32(len(segs)))
buf.Write(b)
// write out all the kinds. These are cast to a byte because their values all
// fit into a byte and it saves a little space
for _, k := range kinds {
buf.WriteByte(byte(k))
}
// write all the segments, lengths first then string bytes for each
for _, s := range segs {
binary.LittleEndian.PutUint32(b, uint32(len(s)))
buf.Write(b)
buf.WriteString(s)
}
// finally write all of it together to the output
lw.w.Write(buf.Bytes())
}
// helper function to have consistently formatted panics and shorter code above
func logpanic(msg, gold string) {
panic(fmt.Sprintf("Malformed log format string. %s.\n%s", msg, gold))
}
var (
bufpool = &sync.Pool{
New: func() interface{} {
temp := make([]byte, 1024) // 1k default size
return &temp
},
}
)
// Log calls LogWriter.Log on the default log writer.
func Log(handle Handle, args ...interface{}) error {
return defaultLogWriter.Log(handle, args...)
}
func (lw *logWriter) Log(handle Handle, args ...interface{}) error {
l := lw.loggers[handle]
if len(l.Kinds) != len(args) {
panic("Number of args does not match log line")
}
buf := bufpool.Get().(*[]byte)
*buf = (*buf)[:0]
b := make([]byte, 8)
*buf = append(*buf, byte(ETLogEntry))
binary.LittleEndian.PutUint32(b, uint32(handle))
*buf = append(*buf, b[:4]...)
for idx := range l.Kinds {
if l.Kinds[idx] != reflect.TypeOf(args[idx]).Kind() {
panic("Argument type does not match log line")
}
// write serialized version to writer
switch l.Kinds[idx] {
case reflect.Bool:
if args[idx].(bool) {
*buf = append(*buf, 1)
} else {
*buf = append(*buf, 0)
}
case reflect.String:
s := args[idx].(string)
binary.LittleEndian.PutUint32(b, uint32(len(s)))
*buf = append(*buf, b[:4]...)
*buf = append(*buf, s...)
// ints
case reflect.Int:
// Assume generic int is 64 bit
i := args[idx].(int)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Int8:
i := args[idx].(int8)
*buf = append(*buf, byte(i))
case reflect.Int16:
i := args[idx].(int16)
binary.LittleEndian.PutUint16(b, uint16(i))
*buf = append(*buf, b[:2]...)
case reflect.Int32:
i := args[idx].(int32)
binary.LittleEndian.PutUint32(b, uint32(i))
*buf = append(*buf, b[:4]...)
case reflect.Int64:
i := args[idx].(int64)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
// uints
case reflect.Uint:
// Assume generic uint is 64 bit
i := args[idx].(uint)
binary.LittleEndian.PutUint64(b, uint64(i))
*buf = append(*buf, b...)
case reflect.Uint8:
i := args[idx].(uint8)
*buf = append(*buf, byte(i))
case reflect.Uint16:
i := args[idx].(uint16)
binary.LittleEndian.PutUint16(b, i)
*buf = append(*buf, b[:2]...)
case reflect.Uint32:
i := args[idx].(uint32)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Uint64:
i := args[idx].(uint64)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// floats
case reflect.Float32:
f := args[idx].(float32)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Float64:
f := args[idx].(float64)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
// complex
case reflect.Complex64:
c := args[idx].(complex64)
f := real(c)
i := math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
f = imag(c)
i = math.Float32bits(f)
binary.LittleEndian.PutUint32(b, i)
*buf = append(*buf, b[:4]...)
case reflect.Complex128:
c := args[idx].(complex128)
f := real(c)
i := math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
f = imag(c)
i = math.Float64bits(f)
binary.LittleEndian.PutUint64(b, i)
*buf = append(*buf, b...)
default:
panic(fmt.Sprintf("Invalid Kind in logger: %v", l.Kinds[idx]))
}
}
lw.writeLock.Lock()
_, err := lw.w.Write(*buf)
lw.writeLock.Unlock()
bufpool.Put(buf)
return err
}
// DebugDump calls LogWriter.DebugDump on the default log writer.
func DebugDump(handle Handle) string {
return defaultLogWriter.DebugDump(handle)
}
func (lw *logWriter) DebugDump(handle Handle) string {
sb := &strings.Builder{}
l := lw.loggers[handle]
for i := 0; i < len(l.Kinds); i++ {
sb.WriteString("+\"")
sb.WriteString(l.Segs[i])
sb.WriteString("\"+")
sb.WriteString("<")
sb.WriteString(l.Kinds[i].String())
sb.WriteString(">")
}
// write the last segment
sb.WriteString("+\"")
sb.WriteString(l.Segs[len(l.Segs)-1])
sb.WriteString("\"+")
return sb.String()
}
| {
// grab write lock to ensure no prblems
lw.writeLock.Lock()
defer lw.writeLock.Unlock()
return lw.w.Flush()
} | identifier_body |
storageos.go | package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/dnephin/cobra"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/docker/docker/pkg/term"
"github.com/storageos/go-api/serror"
"github.com/storageos/go-api/types/versions"
"github.com/storageos/go-cli/cli"
"github.com/storageos/go-cli/cli/command"
"github.com/storageos/go-cli/cli/command/commands"
cliconfig "github.com/storageos/go-cli/cli/config"
"github.com/storageos/go-cli/cli/debug"
cliflags "github.com/storageos/go-cli/cli/flags"
"github.com/storageos/go-cli/version"
)
var shortDesc = `Converged storage for containers.
By using this product, you are agreeing to the terms of the the StorageOS Ltd. End
User Subscription Agreement (EUSA) found at: https://storageos.com/legal/#eusa
To be notified about stable releases and latest features, sign up at https://my.storageos.com.
`
// Disable degugging (logging to stdout) until enabled. In normal use we don't
// want logrus messages going to stdout/stderr.
func init() {
debug.Disable()
}
func isCoreOS() (bool, error) {
f, err := ioutil.ReadFile("/etc/lsb-release")
if err != nil {
return false, err
}
return strings.Contains(string(f), "DISTRIB_ID=CoreOS"), nil
}
func isInContainer() (bool, error) {
f, err := ioutil.ReadFile("/proc/1/cgroup")
if err != nil {
return false, err
}
// TODO: How reliable is this method of detection. Is there a better way?
return strings.Contains(string(f), "docker"), nil
}
func verfyHostPlatform() error {
// Detect native execution on coreOS
// coreOS should not run user-land programs, and we will not work there (outside a container)
if coreos, err := isCoreOS(); err == nil && coreos {
// If we dont think we are in a container, fail and warn the user
if inContainer, err := isInContainer(); err == nil && !inContainer {
return errors.New("to use the StorageOS CLI on Container Linux, you need to run the storageos/cli image")
}
}
return nil
}
func newStorageOSCommand(storageosCli *command.StorageOSCli) *cobra.Command {
opts := cliflags.NewClientOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "storageos [OPTIONS] COMMAND [ARG...]",
Short: shortDesc,
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.Version {
showVersion()
return nil
}
return storageosCli.ShowHelp(cmd, args)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// verify the host platform, exit immediately if known to be incompatible
if err := verfyHostPlatform(); err != nil {
return err
}
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
if err := storageosCli.Initialize(opts); err != nil {
return err
}
return isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
},
}
cli.SetupRootCommand(cmd)
flags = cmd.Flags()
flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit")
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
opts.Common.InstallFlags(flags)
setFlagErrorFunc(storageosCli, cmd, flags, opts)
// setHelpFunc(storageosCli, cmd, flags, opts)
cmd.SetOutput(storageosCli.Out())
commands.AddCommands(cmd, storageosCli)
setValidateArgs(storageosCli, cmd, flags, opts)
return cmd
}
func setFlagErrorFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// When invoking `storageos volume --nonsense`, we need to make sure FlagErrorFunc return appropriate
// output if the feature is not supported.
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
// is called.
flagErrorFunc := cmd.FlagErrorFunc()
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return flagErrorFunc(cmd, err)
})
}
func setHelpFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) {
initializeStorageOSCli(storageosCli, flags, opts)
fmt.Printf("VC: %s\n", storageosCli.Client().ClientVersion())
fmt.Printf("HE: %t\n", storageosCli.HasExperimental())
if err := isSupported(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
fmt.Printf("ERRROR: %v\n", err)
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
if err := ccmd.Help(); err != nil {
ccmd.Println(err)
}
})
}
func setValidateArgs(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// The Args is handled by ValidateArgs in cobra, which does not allows a pre-hook.
// As a result, here we replace the existing Args validation func to a wrapper,
// where the wrapper will check to see if the feature is supported or not.
// The Args validation error will only be returned if the feature is supported.
visitAll(cmd, func(ccmd *cobra.Command) {
// if there is no tags for a command or any of its parent,
// there is no need to wrap the Args validation.
if !hasTags(ccmd) {
return
}
if ccmd.Args == nil |
cmdArgs := ccmd.Args
ccmd.Args = func(cmd *cobra.Command, args []string) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return cmdArgs(cmd, args)
}
})
}
func initializeStorageOSCli(storageosCli *command.StorageOSCli, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
if storageosCli.Client() == nil { // when using --help, PersistentPreRun is not called, so initialization is needed.
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
storageosCli.Initialize(opts)
}
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func noArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("storageos: '%s' is not a valid command.\nSee 'storageos --help'", args[0])
}
func main() {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
logrus.SetOutput(stderr)
storageosCli := command.NewStorageOSCli(stdin, stdout, stderr)
cmd := newStorageOSCommand(storageosCli)
if err := cmd.Execute(); err != nil {
if customError, ok := err.(serror.StorageOSError); ok {
if msg := customError.String(); msg != "" {
fmt.Fprintf(stderr, "error: %s\n", msg)
}
if cause := customError.Err(); cause != nil {
fmt.Fprintf(stderr, "\ncaused by: %s\n", cause)
}
if help := customError.Help(); help != "" {
fmt.Fprintf(stderr, "\n%s\n", help)
}
os.Exit(1)
}
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(stderr, sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
func showVersion() {
fmt.Printf("StorageOS version %s, build %s\n", version.Version, version.Revision)
// TODO: better version
// fmt.Printf("StorageOS API version %s\n", storageosCli.Client().ClientVersion())
}
func preRun(opts *cliflags.ClientOptions) {
cliflags.SetLogLevel(opts.Common.LogLevel)
if opts.ConfigDir != "" {
cliconfig.SetDir(opts.ConfigDir)
}
if opts.Common.Debug {
debug.Enable()
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// hide experimental flags
if !hasExperimental {
if _, ok := f.Annotations["experimental"]; ok {
f.Hidden = true
}
}
// hide flags not supported by the server
if !isFlagSupported(f, clientVersion) {
f.Hidden = true
}
})
for _, subcmd := range cmd.Commands() {
// hide experimental subcommands
if !hasExperimental {
if _, ok := subcmd.Tags["experimental"]; ok {
subcmd.Hidden = true
}
}
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
}
}
}
func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error {
// We check recursively so that, e.g., `storageos volume ls` will return the same output as `storageos volume`
if !hasExperimental {
for curr := cmd; curr != nil; curr = curr.Parent() {
if _, ok := curr.Tags["experimental"]; ok {
fmt.Print("e")
return errors.New("only supported on a StorageOS with experimental features enabled")
}
}
}
if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) {
fmt.Print("ERR: api version\n")
return fmt.Errorf("requires API version %s, but the StorageOS API version is %s", cmdVersion, clientVersion)
}
// errs := []string{}
// cmd.Flags().VisitAll(func(f *pflag.Flag) {
// if f.Changed {
// if !isFlagSupported(f, clientVersion) {
// errs = append(errs, fmt.Sprintf("\"--%s\" requires API version %s, but the StorageOS API version is %s", f.Name, getFlagVersion(f), clientVersion))
// return
// }
// if _, ok := f.Annotations["experimental"]; ok && !hasExperimental {
// errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on StorageOS with experimental features enabled", f.Name))
// }
// }
// })
// if len(errs) > 0 {
// return errors.New(strings.Join(errs, "\n"))
// }
return nil
}
func getFlagVersion(f *pflag.Flag) string {
if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 {
return flagVersion[0]
}
return ""
}
func isFlagSupported(f *pflag.Flag, clientVersion string) bool {
if v := getFlagVersion(f); v != "" {
return versions.GreaterThanOrEqualTo(clientVersion, v)
}
return true
}
// hasTags return true if any of the command's parents has tags
func hasTags(cmd *cobra.Command) bool {
for curr := cmd; curr != nil; curr = curr.Parent() {
if len(curr.Tags) > 0 {
return true
}
}
return false
}
| {
return
} | conditional_block |
storageos.go | package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/dnephin/cobra"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/docker/docker/pkg/term"
"github.com/storageos/go-api/serror"
"github.com/storageos/go-api/types/versions"
"github.com/storageos/go-cli/cli"
"github.com/storageos/go-cli/cli/command"
"github.com/storageos/go-cli/cli/command/commands"
cliconfig "github.com/storageos/go-cli/cli/config"
"github.com/storageos/go-cli/cli/debug"
cliflags "github.com/storageos/go-cli/cli/flags"
"github.com/storageos/go-cli/version"
)
var shortDesc = `Converged storage for containers.
By using this product, you are agreeing to the terms of the the StorageOS Ltd. End
User Subscription Agreement (EUSA) found at: https://storageos.com/legal/#eusa
To be notified about stable releases and latest features, sign up at https://my.storageos.com.
`
// Disable degugging (logging to stdout) until enabled. In normal use we don't
// want logrus messages going to stdout/stderr.
func init() |
func isCoreOS() (bool, error) {
f, err := ioutil.ReadFile("/etc/lsb-release")
if err != nil {
return false, err
}
return strings.Contains(string(f), "DISTRIB_ID=CoreOS"), nil
}
func isInContainer() (bool, error) {
f, err := ioutil.ReadFile("/proc/1/cgroup")
if err != nil {
return false, err
}
// TODO: How reliable is this method of detection. Is there a better way?
return strings.Contains(string(f), "docker"), nil
}
func verfyHostPlatform() error {
// Detect native execution on coreOS
// coreOS should not run user-land programs, and we will not work there (outside a container)
if coreos, err := isCoreOS(); err == nil && coreos {
// If we dont think we are in a container, fail and warn the user
if inContainer, err := isInContainer(); err == nil && !inContainer {
return errors.New("to use the StorageOS CLI on Container Linux, you need to run the storageos/cli image")
}
}
return nil
}
func newStorageOSCommand(storageosCli *command.StorageOSCli) *cobra.Command {
opts := cliflags.NewClientOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "storageos [OPTIONS] COMMAND [ARG...]",
Short: shortDesc,
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.Version {
showVersion()
return nil
}
return storageosCli.ShowHelp(cmd, args)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// verify the host platform, exit immediately if known to be incompatible
if err := verfyHostPlatform(); err != nil {
return err
}
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
if err := storageosCli.Initialize(opts); err != nil {
return err
}
return isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
},
}
cli.SetupRootCommand(cmd)
flags = cmd.Flags()
flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit")
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
opts.Common.InstallFlags(flags)
setFlagErrorFunc(storageosCli, cmd, flags, opts)
// setHelpFunc(storageosCli, cmd, flags, opts)
cmd.SetOutput(storageosCli.Out())
commands.AddCommands(cmd, storageosCli)
setValidateArgs(storageosCli, cmd, flags, opts)
return cmd
}
func setFlagErrorFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// When invoking `storageos volume --nonsense`, we need to make sure FlagErrorFunc return appropriate
// output if the feature is not supported.
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
// is called.
flagErrorFunc := cmd.FlagErrorFunc()
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return flagErrorFunc(cmd, err)
})
}
func setHelpFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) {
initializeStorageOSCli(storageosCli, flags, opts)
fmt.Printf("VC: %s\n", storageosCli.Client().ClientVersion())
fmt.Printf("HE: %t\n", storageosCli.HasExperimental())
if err := isSupported(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
fmt.Printf("ERRROR: %v\n", err)
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
if err := ccmd.Help(); err != nil {
ccmd.Println(err)
}
})
}
func setValidateArgs(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// The Args is handled by ValidateArgs in cobra, which does not allows a pre-hook.
// As a result, here we replace the existing Args validation func to a wrapper,
// where the wrapper will check to see if the feature is supported or not.
// The Args validation error will only be returned if the feature is supported.
visitAll(cmd, func(ccmd *cobra.Command) {
// if there is no tags for a command or any of its parent,
// there is no need to wrap the Args validation.
if !hasTags(ccmd) {
return
}
if ccmd.Args == nil {
return
}
cmdArgs := ccmd.Args
ccmd.Args = func(cmd *cobra.Command, args []string) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return cmdArgs(cmd, args)
}
})
}
func initializeStorageOSCli(storageosCli *command.StorageOSCli, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
if storageosCli.Client() == nil { // when using --help, PersistentPreRun is not called, so initialization is needed.
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
storageosCli.Initialize(opts)
}
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func noArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("storageos: '%s' is not a valid command.\nSee 'storageos --help'", args[0])
}
func main() {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
logrus.SetOutput(stderr)
storageosCli := command.NewStorageOSCli(stdin, stdout, stderr)
cmd := newStorageOSCommand(storageosCli)
if err := cmd.Execute(); err != nil {
if customError, ok := err.(serror.StorageOSError); ok {
if msg := customError.String(); msg != "" {
fmt.Fprintf(stderr, "error: %s\n", msg)
}
if cause := customError.Err(); cause != nil {
fmt.Fprintf(stderr, "\ncaused by: %s\n", cause)
}
if help := customError.Help(); help != "" {
fmt.Fprintf(stderr, "\n%s\n", help)
}
os.Exit(1)
}
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(stderr, sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
func showVersion() {
fmt.Printf("StorageOS version %s, build %s\n", version.Version, version.Revision)
// TODO: better version
// fmt.Printf("StorageOS API version %s\n", storageosCli.Client().ClientVersion())
}
func preRun(opts *cliflags.ClientOptions) {
cliflags.SetLogLevel(opts.Common.LogLevel)
if opts.ConfigDir != "" {
cliconfig.SetDir(opts.ConfigDir)
}
if opts.Common.Debug {
debug.Enable()
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// hide experimental flags
if !hasExperimental {
if _, ok := f.Annotations["experimental"]; ok {
f.Hidden = true
}
}
// hide flags not supported by the server
if !isFlagSupported(f, clientVersion) {
f.Hidden = true
}
})
for _, subcmd := range cmd.Commands() {
// hide experimental subcommands
if !hasExperimental {
if _, ok := subcmd.Tags["experimental"]; ok {
subcmd.Hidden = true
}
}
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
}
}
}
func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error {
// We check recursively so that, e.g., `storageos volume ls` will return the same output as `storageos volume`
if !hasExperimental {
for curr := cmd; curr != nil; curr = curr.Parent() {
if _, ok := curr.Tags["experimental"]; ok {
fmt.Print("e")
return errors.New("only supported on a StorageOS with experimental features enabled")
}
}
}
if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) {
fmt.Print("ERR: api version\n")
return fmt.Errorf("requires API version %s, but the StorageOS API version is %s", cmdVersion, clientVersion)
}
// errs := []string{}
// cmd.Flags().VisitAll(func(f *pflag.Flag) {
// if f.Changed {
// if !isFlagSupported(f, clientVersion) {
// errs = append(errs, fmt.Sprintf("\"--%s\" requires API version %s, but the StorageOS API version is %s", f.Name, getFlagVersion(f), clientVersion))
// return
// }
// if _, ok := f.Annotations["experimental"]; ok && !hasExperimental {
// errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on StorageOS with experimental features enabled", f.Name))
// }
// }
// })
// if len(errs) > 0 {
// return errors.New(strings.Join(errs, "\n"))
// }
return nil
}
func getFlagVersion(f *pflag.Flag) string {
if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 {
return flagVersion[0]
}
return ""
}
func isFlagSupported(f *pflag.Flag, clientVersion string) bool {
if v := getFlagVersion(f); v != "" {
return versions.GreaterThanOrEqualTo(clientVersion, v)
}
return true
}
// hasTags return true if any of the command's parents has tags
func hasTags(cmd *cobra.Command) bool {
for curr := cmd; curr != nil; curr = curr.Parent() {
if len(curr.Tags) > 0 {
return true
}
}
return false
}
| {
debug.Disable()
} | identifier_body |
storageos.go | package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/dnephin/cobra"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/docker/docker/pkg/term"
"github.com/storageos/go-api/serror"
"github.com/storageos/go-api/types/versions"
"github.com/storageos/go-cli/cli"
"github.com/storageos/go-cli/cli/command"
"github.com/storageos/go-cli/cli/command/commands"
cliconfig "github.com/storageos/go-cli/cli/config"
"github.com/storageos/go-cli/cli/debug"
cliflags "github.com/storageos/go-cli/cli/flags"
"github.com/storageos/go-cli/version"
)
var shortDesc = `Converged storage for containers.
By using this product, you are agreeing to the terms of the the StorageOS Ltd. End
User Subscription Agreement (EUSA) found at: https://storageos.com/legal/#eusa
To be notified about stable releases and latest features, sign up at https://my.storageos.com.
`
// Disable degugging (logging to stdout) until enabled. In normal use we don't
// want logrus messages going to stdout/stderr.
func init() {
debug.Disable()
}
func isCoreOS() (bool, error) {
f, err := ioutil.ReadFile("/etc/lsb-release")
if err != nil {
return false, err
}
return strings.Contains(string(f), "DISTRIB_ID=CoreOS"), nil
}
func isInContainer() (bool, error) {
f, err := ioutil.ReadFile("/proc/1/cgroup")
if err != nil {
return false, err
}
// TODO: How reliable is this method of detection. Is there a better way?
return strings.Contains(string(f), "docker"), nil
}
func verfyHostPlatform() error {
// Detect native execution on coreOS
// coreOS should not run user-land programs, and we will not work there (outside a container)
if coreos, err := isCoreOS(); err == nil && coreos {
// If we dont think we are in a container, fail and warn the user
if inContainer, err := isInContainer(); err == nil && !inContainer {
return errors.New("to use the StorageOS CLI on Container Linux, you need to run the storageos/cli image")
}
}
return nil
}
func | (storageosCli *command.StorageOSCli) *cobra.Command {
opts := cliflags.NewClientOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "storageos [OPTIONS] COMMAND [ARG...]",
Short: shortDesc,
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.Version {
showVersion()
return nil
}
return storageosCli.ShowHelp(cmd, args)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// verify the host platform, exit immediately if known to be incompatible
if err := verfyHostPlatform(); err != nil {
return err
}
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
if err := storageosCli.Initialize(opts); err != nil {
return err
}
return isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
},
}
cli.SetupRootCommand(cmd)
flags = cmd.Flags()
flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit")
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
opts.Common.InstallFlags(flags)
setFlagErrorFunc(storageosCli, cmd, flags, opts)
// setHelpFunc(storageosCli, cmd, flags, opts)
cmd.SetOutput(storageosCli.Out())
commands.AddCommands(cmd, storageosCli)
setValidateArgs(storageosCli, cmd, flags, opts)
return cmd
}
func setFlagErrorFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// When invoking `storageos volume --nonsense`, we need to make sure FlagErrorFunc return appropriate
// output if the feature is not supported.
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
// is called.
flagErrorFunc := cmd.FlagErrorFunc()
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return flagErrorFunc(cmd, err)
})
}
func setHelpFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) {
initializeStorageOSCli(storageosCli, flags, opts)
fmt.Printf("VC: %s\n", storageosCli.Client().ClientVersion())
fmt.Printf("HE: %t\n", storageosCli.HasExperimental())
if err := isSupported(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
fmt.Printf("ERRROR: %v\n", err)
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
if err := ccmd.Help(); err != nil {
ccmd.Println(err)
}
})
}
func setValidateArgs(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// The Args is handled by ValidateArgs in cobra, which does not allows a pre-hook.
// As a result, here we replace the existing Args validation func to a wrapper,
// where the wrapper will check to see if the feature is supported or not.
// The Args validation error will only be returned if the feature is supported.
visitAll(cmd, func(ccmd *cobra.Command) {
// if there is no tags for a command or any of its parent,
// there is no need to wrap the Args validation.
if !hasTags(ccmd) {
return
}
if ccmd.Args == nil {
return
}
cmdArgs := ccmd.Args
ccmd.Args = func(cmd *cobra.Command, args []string) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return cmdArgs(cmd, args)
}
})
}
func initializeStorageOSCli(storageosCli *command.StorageOSCli, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
if storageosCli.Client() == nil { // when using --help, PersistentPreRun is not called, so initialization is needed.
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
storageosCli.Initialize(opts)
}
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func noArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("storageos: '%s' is not a valid command.\nSee 'storageos --help'", args[0])
}
func main() {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
logrus.SetOutput(stderr)
storageosCli := command.NewStorageOSCli(stdin, stdout, stderr)
cmd := newStorageOSCommand(storageosCli)
if err := cmd.Execute(); err != nil {
if customError, ok := err.(serror.StorageOSError); ok {
if msg := customError.String(); msg != "" {
fmt.Fprintf(stderr, "error: %s\n", msg)
}
if cause := customError.Err(); cause != nil {
fmt.Fprintf(stderr, "\ncaused by: %s\n", cause)
}
if help := customError.Help(); help != "" {
fmt.Fprintf(stderr, "\n%s\n", help)
}
os.Exit(1)
}
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(stderr, sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
func showVersion() {
fmt.Printf("StorageOS version %s, build %s\n", version.Version, version.Revision)
// TODO: better version
// fmt.Printf("StorageOS API version %s\n", storageosCli.Client().ClientVersion())
}
func preRun(opts *cliflags.ClientOptions) {
cliflags.SetLogLevel(opts.Common.LogLevel)
if opts.ConfigDir != "" {
cliconfig.SetDir(opts.ConfigDir)
}
if opts.Common.Debug {
debug.Enable()
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// hide experimental flags
if !hasExperimental {
if _, ok := f.Annotations["experimental"]; ok {
f.Hidden = true
}
}
// hide flags not supported by the server
if !isFlagSupported(f, clientVersion) {
f.Hidden = true
}
})
for _, subcmd := range cmd.Commands() {
// hide experimental subcommands
if !hasExperimental {
if _, ok := subcmd.Tags["experimental"]; ok {
subcmd.Hidden = true
}
}
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
}
}
}
func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error {
// We check recursively so that, e.g., `storageos volume ls` will return the same output as `storageos volume`
if !hasExperimental {
for curr := cmd; curr != nil; curr = curr.Parent() {
if _, ok := curr.Tags["experimental"]; ok {
fmt.Print("e")
return errors.New("only supported on a StorageOS with experimental features enabled")
}
}
}
if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) {
fmt.Print("ERR: api version\n")
return fmt.Errorf("requires API version %s, but the StorageOS API version is %s", cmdVersion, clientVersion)
}
// errs := []string{}
// cmd.Flags().VisitAll(func(f *pflag.Flag) {
// if f.Changed {
// if !isFlagSupported(f, clientVersion) {
// errs = append(errs, fmt.Sprintf("\"--%s\" requires API version %s, but the StorageOS API version is %s", f.Name, getFlagVersion(f), clientVersion))
// return
// }
// if _, ok := f.Annotations["experimental"]; ok && !hasExperimental {
// errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on StorageOS with experimental features enabled", f.Name))
// }
// }
// })
// if len(errs) > 0 {
// return errors.New(strings.Join(errs, "\n"))
// }
return nil
}
func getFlagVersion(f *pflag.Flag) string {
if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 {
return flagVersion[0]
}
return ""
}
func isFlagSupported(f *pflag.Flag, clientVersion string) bool {
if v := getFlagVersion(f); v != "" {
return versions.GreaterThanOrEqualTo(clientVersion, v)
}
return true
}
// hasTags return true if any of the command's parents has tags
func hasTags(cmd *cobra.Command) bool {
for curr := cmd; curr != nil; curr = curr.Parent() {
if len(curr.Tags) > 0 {
return true
}
}
return false
}
| newStorageOSCommand | identifier_name |
storageos.go | package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/dnephin/cobra"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/docker/docker/pkg/term"
"github.com/storageos/go-api/serror"
"github.com/storageos/go-api/types/versions"
"github.com/storageos/go-cli/cli"
"github.com/storageos/go-cli/cli/command"
"github.com/storageos/go-cli/cli/command/commands"
cliconfig "github.com/storageos/go-cli/cli/config"
"github.com/storageos/go-cli/cli/debug"
cliflags "github.com/storageos/go-cli/cli/flags"
"github.com/storageos/go-cli/version"
)
var shortDesc = `Converged storage for containers.
By using this product, you are agreeing to the terms of the the StorageOS Ltd. End
User Subscription Agreement (EUSA) found at: https://storageos.com/legal/#eusa
To be notified about stable releases and latest features, sign up at https://my.storageos.com.
`
// Disable degugging (logging to stdout) until enabled. In normal use we don't
// want logrus messages going to stdout/stderr.
func init() {
debug.Disable()
}
func isCoreOS() (bool, error) {
f, err := ioutil.ReadFile("/etc/lsb-release")
if err != nil {
return false, err
}
return strings.Contains(string(f), "DISTRIB_ID=CoreOS"), nil
}
func isInContainer() (bool, error) {
f, err := ioutil.ReadFile("/proc/1/cgroup")
if err != nil {
return false, err
}
// TODO: How reliable is this method of detection. Is there a better way?
return strings.Contains(string(f), "docker"), nil
}
func verfyHostPlatform() error {
// Detect native execution on coreOS
// coreOS should not run user-land programs, and we will not work there (outside a container)
if coreos, err := isCoreOS(); err == nil && coreos {
// If we dont think we are in a container, fail and warn the user
if inContainer, err := isInContainer(); err == nil && !inContainer {
return errors.New("to use the StorageOS CLI on Container Linux, you need to run the storageos/cli image")
}
}
return nil
}
func newStorageOSCommand(storageosCli *command.StorageOSCli) *cobra.Command {
opts := cliflags.NewClientOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "storageos [OPTIONS] COMMAND [ARG...]",
Short: shortDesc,
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.Version {
showVersion()
return nil
}
return storageosCli.ShowHelp(cmd, args)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// verify the host platform, exit immediately if known to be incompatible
if err := verfyHostPlatform(); err != nil {
return err
}
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
if err := storageosCli.Initialize(opts); err != nil {
return err
}
return isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
},
}
cli.SetupRootCommand(cmd)
flags = cmd.Flags()
flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit")
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
opts.Common.InstallFlags(flags)
setFlagErrorFunc(storageosCli, cmd, flags, opts)
// setHelpFunc(storageosCli, cmd, flags, opts)
cmd.SetOutput(storageosCli.Out())
commands.AddCommands(cmd, storageosCli)
setValidateArgs(storageosCli, cmd, flags, opts)
return cmd
}
func setFlagErrorFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// When invoking `storageos volume --nonsense`, we need to make sure FlagErrorFunc return appropriate
// output if the feature is not supported.
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
// is called.
flagErrorFunc := cmd.FlagErrorFunc()
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return flagErrorFunc(cmd, err)
})
}
func setHelpFunc(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) {
initializeStorageOSCli(storageosCli, flags, opts)
fmt.Printf("VC: %s\n", storageosCli.Client().ClientVersion())
fmt.Printf("HE: %t\n", storageosCli.HasExperimental())
if err := isSupported(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
fmt.Printf("ERRROR: %v\n", err)
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental())
if err := ccmd.Help(); err != nil {
ccmd.Println(err)
}
})
}
func setValidateArgs(storageosCli *command.StorageOSCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// The Args is handled by ValidateArgs in cobra, which does not allows a pre-hook.
// As a result, here we replace the existing Args validation func to a wrapper,
// where the wrapper will check to see if the feature is supported or not.
// The Args validation error will only be returned if the feature is supported.
visitAll(cmd, func(ccmd *cobra.Command) {
// if there is no tags for a command or any of its parent,
// there is no need to wrap the Args validation.
if !hasTags(ccmd) {
return
}
if ccmd.Args == nil {
return
}
cmdArgs := ccmd.Args
ccmd.Args = func(cmd *cobra.Command, args []string) error {
initializeStorageOSCli(storageosCli, flags, opts)
if err := isSupported(cmd, storageosCli.Client().ClientVersion(), storageosCli.HasExperimental()); err != nil {
return err
}
return cmdArgs(cmd, args)
}
})
}
func initializeStorageOSCli(storageosCli *command.StorageOSCli, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
if storageosCli.Client() == nil { // when using --help, PersistentPreRun is not called, so initialization is needed.
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
preRun(opts)
storageosCli.Initialize(opts)
}
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func noArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf("storageos: '%s' is not a valid command.\nSee 'storageos --help'", args[0])
}
func main() {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
logrus.SetOutput(stderr)
storageosCli := command.NewStorageOSCli(stdin, stdout, stderr)
cmd := newStorageOSCommand(storageosCli)
if err := cmd.Execute(); err != nil {
if customError, ok := err.(serror.StorageOSError); ok {
if msg := customError.String(); msg != "" {
fmt.Fprintf(stderr, "error: %s\n", msg)
}
if cause := customError.Err(); cause != nil {
fmt.Fprintf(stderr, "\ncaused by: %s\n", cause)
}
if help := customError.Help(); help != "" {
fmt.Fprintf(stderr, "\n%s\n", help)
}
os.Exit(1)
}
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(stderr, sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
func showVersion() {
fmt.Printf("StorageOS version %s, build %s\n", version.Version, version.Revision)
// TODO: better version
// fmt.Printf("StorageOS API version %s\n", storageosCli.Client().ClientVersion())
}
func preRun(opts *cliflags.ClientOptions) {
cliflags.SetLogLevel(opts.Common.LogLevel)
if opts.ConfigDir != "" {
cliconfig.SetDir(opts.ConfigDir)
}
if opts.Common.Debug {
debug.Enable()
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// hide experimental flags
if !hasExperimental {
if _, ok := f.Annotations["experimental"]; ok {
f.Hidden = true
}
}
// hide flags not supported by the server
if !isFlagSupported(f, clientVersion) {
f.Hidden = true
}
})
for _, subcmd := range cmd.Commands() {
// hide experimental subcommands
if !hasExperimental {
if _, ok := subcmd.Tags["experimental"]; ok {
subcmd.Hidden = true
}
}
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
}
}
}
func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error {
// We check recursively so that, e.g., `storageos volume ls` will return the same output as `storageos volume`
if !hasExperimental {
for curr := cmd; curr != nil; curr = curr.Parent() {
if _, ok := curr.Tags["experimental"]; ok {
fmt.Print("e")
return errors.New("only supported on a StorageOS with experimental features enabled")
}
}
}
if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) {
fmt.Print("ERR: api version\n")
return fmt.Errorf("requires API version %s, but the StorageOS API version is %s", cmdVersion, clientVersion)
}
// errs := []string{}
// cmd.Flags().VisitAll(func(f *pflag.Flag) {
// if f.Changed {
// if !isFlagSupported(f, clientVersion) {
// errs = append(errs, fmt.Sprintf("\"--%s\" requires API version %s, but the StorageOS API version is %s", f.Name, getFlagVersion(f), clientVersion))
// return
// }
// if _, ok := f.Annotations["experimental"]; ok && !hasExperimental {
// errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on StorageOS with experimental features enabled", f.Name))
// }
// }
// }) | // }
return nil
}
func getFlagVersion(f *pflag.Flag) string {
if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 {
return flagVersion[0]
}
return ""
}
func isFlagSupported(f *pflag.Flag, clientVersion string) bool {
if v := getFlagVersion(f); v != "" {
return versions.GreaterThanOrEqualTo(clientVersion, v)
}
return true
}
// hasTags return true if any of the command's parents has tags
func hasTags(cmd *cobra.Command) bool {
for curr := cmd; curr != nil; curr = curr.Parent() {
if len(curr.Tags) > 0 {
return true
}
}
return false
} | // if len(errs) > 0 {
// return errors.New(strings.Join(errs, "\n")) | random_line_split |
pool.rs | use std::collections::BinaryHeap;
use std::iter::IntoIterator;
use std::{marker, mem};
use std::sync::{mpsc, atomic, Mutex, Arc};
use std::thread;
use fnbox::FnBox;
use crossbeam::{self, Scope};
type JobInner<'b> = Box<for<'a> FnBox<&'a [mpsc::Sender<Work>]> + Send + 'b>;
struct Job {
func: JobInner<'static>,
}
/// A thread pool.
///
/// This pool allows one to spawn several threads in one go, and then
/// execute any number of "short-lifetime" jobs on those threads,
/// without having to pay the thread spawning cost, or risk exhausting
/// system resources.
///
/// The pool currently consists of some number of worker threads
/// (dynamic, chosen at creation time) along with a single supervisor
/// thread. The synchronisation overhead is currently very large.
///
/// # "Short-lifetime"?
///
/// Jobs submitted to this pool can have any lifetime at all, that is,
/// the closures passed in (and elements of iterators used, etc.) can
/// have borrows pointing into arbitrary stack frames, even stack
/// frames that don't outlive the pool itself. This differs to
/// something like
/// [`scoped_threadpool`](https://crates.io/crates/scoped_threadpool),
/// where the jobs must outlive the pool.
///
/// This extra flexibility is achieved with careful unsafe code, by
/// exposing an API that is a generalised version of
/// [`crossbeam`](https://github.com/aturon/crossbeam) `Scope::spawn`
/// and the old `std::thread::scoped`: at the lowest-level a submitted
/// job returns a `JobHandle` token that ensures that job is finished
/// before any data the job might reference is invalidated
/// (i.e. manages the lifetimes). Higher-level functions will usually
/// wrap or otherwise hide the handle.
///
/// However, this comes at a cost: for easy of implementation `Pool`
/// currently only exposes "batch" jobs like `for_` and `map` and
/// these jobs take control of the whole pool. That is, one cannot
/// easily incrementally submit arbitrary closures to execute on this
/// thread pool, which is functionality that `threadpool::ScopedPool`
/// offers.
///
/// # Example
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// // a function that takes some arbitrary pool and uses the pool to
/// // manipulate data in its own stack frame.
/// fn do_work(pool: &mut Pool) {
/// let mut v = [0; 8];
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// let w = [2, 0, 1, 5, 0, 3, 0, 3];
///
/// // add the two arrays, in parallel
/// let z: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, v.iter().zip(w.iter()), |(x, y)| *x + *y).collect()
/// });
///
/// assert_eq!(z, &[5, 3, 4, 8, 3, 6, 3, 6]);
/// }
///
/// # fn main() {
/// let mut pool = Pool::new(4);
/// do_work(&mut pool);
/// # }
/// ```
pub struct Pool {
job_queue: mpsc::Sender<(Option<Job>, mpsc::Sender<Result<(), ()>>)>,
job_status: Option<Arc<Mutex<JobStatus>>>,
n_threads: usize,
}
#[derive(Copy, Clone)]
struct WorkerId { n: usize }
type WorkInner<'a> = &'a mut (FnMut(WorkerId) + Send + 'a);
struct Work {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() {
break
}
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() { | Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
/// `main_fn` will manage any internal coordination required to
/// distribute chunks of work.
///
/// The job must take pains to ensure `main_fn` doesn't quit
/// before the workers do.
pub unsafe fn execute<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, data: A, gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
self.execute_nonunsafe(scope, data, gen_fn, main_fn)
}
// separate function to ensure we get `unsafe` checking inside this one
fn execute_nonunsafe<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, mut data: A,
mut gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
let n_threads = self.n_threads;
// transmutes scary? only a little: the returned `JobHandle`
// ensures safety by connecting this job to the outside stack
// frame.
let func: JobInner<'f> = Box::new(move |workers: &[mpsc::Sender<Work>]| {
assert_eq!(workers.len(), n_threads);
let mut worker_fns: Vec<_> = (0..n_threads).map(|_| gen_fn(&mut data)).collect();
for (func, worker) in worker_fns.iter_mut().zip(workers.iter()) {
let func: WorkInner = func;
let func: WorkInner<'static> = unsafe {
mem::transmute(func)
};
worker.send(Work { func: func }).unwrap();
}
main_fn(data)
});
let func: JobInner<'static> = unsafe {
mem::transmute(func)
};
let (tx, rx) = mpsc::channel();
self.job_queue.send((Some(Job { func: func }), tx)).unwrap();
let status = Arc::new(Mutex::new(JobStatus {
wait: true,
job_finished: rx,
}));
// this probably isn't quite right? what happens to older jobs
// (e.g. if a previous one was mem::forget'd)
self.job_status = Some(status.clone());
let status_ = status.clone();
scope.defer(move || {
status_.lock().unwrap().wait();
});
JobHandle {
pool: self,
status: status,
_funcs: marker::PhantomData,
}
}
}
use std::cmp::Ordering;
struct Packet<T> {
// this should be unique for a given instance of `*ParMap`
idx: usize,
data: T,
}
impl<T> PartialOrd for Packet<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl<T> Ord for Packet<T> {
// reverse the ordering, to work with the max-heap
fn cmp(&self, other: &Self) -> Ordering { other.idx.cmp(&self.idx) }
}
impl<T> PartialEq for Packet<T> {
fn eq(&self, other: &Self) -> bool { self.idx == other.idx }
}
impl<T> Eq for Packet<T> {}
/// A parallel-mapping iterator, that yields elements in the order
/// they are computed, not the order from which they are yielded by
/// the underlying iterator. Constructed by calling
/// `Pool::unordered_map`.
pub struct UnorderedParMap<'pool, 'a, T: 'a + Send> {
rx: mpsc::Receiver<Packet<T>>,
_guard: JobHandle<'pool, 'a>,
}
impl<'pool, 'a,T: 'a + Send> Iterator for UnorderedParMap<'pool , 'a, T> {
type Item = (usize, T);
fn next(&mut self) -> Option<(usize, T)> {
match self.rx.recv() {
Ok(Packet { data, idx }) => Some((idx, data)),
Err(mpsc::RecvError) => None,
}
}
}
/// A parallel-mapping iterator, that yields elements in the order
/// they are yielded by the underlying iterator. Constructed by
/// calling `Pool::map`.
pub struct ParMap<'pool, 'a, T: 'a + Send> {
unordered: UnorderedParMap<'pool, 'a, T>,
looking_for: usize,
queue: BinaryHeap<Packet<T>>
}
impl<'pool, 'a, T: Send + 'a> Iterator for ParMap<'pool, 'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if self.queue.peek().map_or(false, |x| x.idx == self.looking_for) {
// we've found what we want, so lets return it
let packet = self.queue.pop().unwrap();
self.looking_for += 1;
return Some(packet.data)
}
match self.unordered.rx.recv() {
// this could be optimised to check for `packet.idx ==
// self.looking_for` to avoid the BinaryHeap
// interaction if its what we want.
Ok(packet) => self.queue.push(packet),
// all done
Err(mpsc::RecvError) => return None,
}
}
}
} | // closed, done!
Ok(true) | Err(_) => break, | random_line_split |
pool.rs | use std::collections::BinaryHeap;
use std::iter::IntoIterator;
use std::{marker, mem};
use std::sync::{mpsc, atomic, Mutex, Arc};
use std::thread;
use fnbox::FnBox;
use crossbeam::{self, Scope};
type JobInner<'b> = Box<for<'a> FnBox<&'a [mpsc::Sender<Work>]> + Send + 'b>;
struct Job {
func: JobInner<'static>,
}
/// A thread pool.
///
/// This pool allows one to spawn several threads in one go, and then
/// execute any number of "short-lifetime" jobs on those threads,
/// without having to pay the thread spawning cost, or risk exhausting
/// system resources.
///
/// The pool currently consists of some number of worker threads
/// (dynamic, chosen at creation time) along with a single supervisor
/// thread. The synchronisation overhead is currently very large.
///
/// # "Short-lifetime"?
///
/// Jobs submitted to this pool can have any lifetime at all, that is,
/// the closures passed in (and elements of iterators used, etc.) can
/// have borrows pointing into arbitrary stack frames, even stack
/// frames that don't outlive the pool itself. This differs to
/// something like
/// [`scoped_threadpool`](https://crates.io/crates/scoped_threadpool),
/// where the jobs must outlive the pool.
///
/// This extra flexibility is achieved with careful unsafe code, by
/// exposing an API that is a generalised version of
/// [`crossbeam`](https://github.com/aturon/crossbeam) `Scope::spawn`
/// and the old `std::thread::scoped`: at the lowest-level a submitted
/// job returns a `JobHandle` token that ensures that job is finished
/// before any data the job might reference is invalidated
/// (i.e. manages the lifetimes). Higher-level functions will usually
/// wrap or otherwise hide the handle.
///
/// However, this comes at a cost: for easy of implementation `Pool`
/// currently only exposes "batch" jobs like `for_` and `map` and
/// these jobs take control of the whole pool. That is, one cannot
/// easily incrementally submit arbitrary closures to execute on this
/// thread pool, which is functionality that `threadpool::ScopedPool`
/// offers.
///
/// # Example
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// // a function that takes some arbitrary pool and uses the pool to
/// // manipulate data in its own stack frame.
/// fn do_work(pool: &mut Pool) {
/// let mut v = [0; 8];
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// let w = [2, 0, 1, 5, 0, 3, 0, 3];
///
/// // add the two arrays, in parallel
/// let z: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, v.iter().zip(w.iter()), |(x, y)| *x + *y).collect()
/// });
///
/// assert_eq!(z, &[5, 3, 4, 8, 3, 6, 3, 6]);
/// }
///
/// # fn main() {
/// let mut pool = Pool::new(4);
/// do_work(&mut pool);
/// # }
/// ```
pub struct Pool {
job_queue: mpsc::Sender<(Option<Job>, mpsc::Sender<Result<(), ()>>)>,
job_status: Option<Arc<Mutex<JobStatus>>>,
n_threads: usize,
}
#[derive(Copy, Clone)]
struct WorkerId { n: usize }
type WorkInner<'a> = &'a mut (FnMut(WorkerId) + Send + 'a);
struct | {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() {
break
}
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() {
// closed, done!
Ok(true) | Err(_) => break,
Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
/// `main_fn` will manage any internal coordination required to
/// distribute chunks of work.
///
/// The job must take pains to ensure `main_fn` doesn't quit
/// before the workers do.
pub unsafe fn execute<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, data: A, gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
self.execute_nonunsafe(scope, data, gen_fn, main_fn)
}
// separate function to ensure we get `unsafe` checking inside this one
fn execute_nonunsafe<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, mut data: A,
mut gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
let n_threads = self.n_threads;
// transmutes scary? only a little: the returned `JobHandle`
// ensures safety by connecting this job to the outside stack
// frame.
let func: JobInner<'f> = Box::new(move |workers: &[mpsc::Sender<Work>]| {
assert_eq!(workers.len(), n_threads);
let mut worker_fns: Vec<_> = (0..n_threads).map(|_| gen_fn(&mut data)).collect();
for (func, worker) in worker_fns.iter_mut().zip(workers.iter()) {
let func: WorkInner = func;
let func: WorkInner<'static> = unsafe {
mem::transmute(func)
};
worker.send(Work { func: func }).unwrap();
}
main_fn(data)
});
let func: JobInner<'static> = unsafe {
mem::transmute(func)
};
let (tx, rx) = mpsc::channel();
self.job_queue.send((Some(Job { func: func }), tx)).unwrap();
let status = Arc::new(Mutex::new(JobStatus {
wait: true,
job_finished: rx,
}));
// this probably isn't quite right? what happens to older jobs
// (e.g. if a previous one was mem::forget'd)
self.job_status = Some(status.clone());
let status_ = status.clone();
scope.defer(move || {
status_.lock().unwrap().wait();
});
JobHandle {
pool: self,
status: status,
_funcs: marker::PhantomData,
}
}
}
use std::cmp::Ordering;
struct Packet<T> {
// this should be unique for a given instance of `*ParMap`
idx: usize,
data: T,
}
impl<T> PartialOrd for Packet<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl<T> Ord for Packet<T> {
// reverse the ordering, to work with the max-heap
fn cmp(&self, other: &Self) -> Ordering { other.idx.cmp(&self.idx) }
}
impl<T> PartialEq for Packet<T> {
fn eq(&self, other: &Self) -> bool { self.idx == other.idx }
}
impl<T> Eq for Packet<T> {}
/// A parallel-mapping iterator, that yields elements in the order
/// they are computed, not the order from which they are yielded by
/// the underlying iterator. Constructed by calling
/// `Pool::unordered_map`.
pub struct UnorderedParMap<'pool, 'a, T: 'a + Send> {
rx: mpsc::Receiver<Packet<T>>,
_guard: JobHandle<'pool, 'a>,
}
impl<'pool, 'a,T: 'a + Send> Iterator for UnorderedParMap<'pool , 'a, T> {
type Item = (usize, T);
fn next(&mut self) -> Option<(usize, T)> {
match self.rx.recv() {
Ok(Packet { data, idx }) => Some((idx, data)),
Err(mpsc::RecvError) => None,
}
}
}
/// A parallel-mapping iterator, that yields elements in the order
/// they are yielded by the underlying iterator. Constructed by
/// calling `Pool::map`.
pub struct ParMap<'pool, 'a, T: 'a + Send> {
unordered: UnorderedParMap<'pool, 'a, T>,
looking_for: usize,
queue: BinaryHeap<Packet<T>>
}
impl<'pool, 'a, T: Send + 'a> Iterator for ParMap<'pool, 'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if self.queue.peek().map_or(false, |x| x.idx == self.looking_for) {
// we've found what we want, so lets return it
let packet = self.queue.pop().unwrap();
self.looking_for += 1;
return Some(packet.data)
}
match self.unordered.rx.recv() {
// this could be optimised to check for `packet.idx ==
// self.looking_for` to avoid the BinaryHeap
// interaction if its what we want.
Ok(packet) => self.queue.push(packet),
// all done
Err(mpsc::RecvError) => return None,
}
}
}
}
| Work | identifier_name |
pool.rs | use std::collections::BinaryHeap;
use std::iter::IntoIterator;
use std::{marker, mem};
use std::sync::{mpsc, atomic, Mutex, Arc};
use std::thread;
use fnbox::FnBox;
use crossbeam::{self, Scope};
type JobInner<'b> = Box<for<'a> FnBox<&'a [mpsc::Sender<Work>]> + Send + 'b>;
struct Job {
func: JobInner<'static>,
}
/// A thread pool.
///
/// This pool allows one to spawn several threads in one go, and then
/// execute any number of "short-lifetime" jobs on those threads,
/// without having to pay the thread spawning cost, or risk exhausting
/// system resources.
///
/// The pool currently consists of some number of worker threads
/// (dynamic, chosen at creation time) along with a single supervisor
/// thread. The synchronisation overhead is currently very large.
///
/// # "Short-lifetime"?
///
/// Jobs submitted to this pool can have any lifetime at all, that is,
/// the closures passed in (and elements of iterators used, etc.) can
/// have borrows pointing into arbitrary stack frames, even stack
/// frames that don't outlive the pool itself. This differs to
/// something like
/// [`scoped_threadpool`](https://crates.io/crates/scoped_threadpool),
/// where the jobs must outlive the pool.
///
/// This extra flexibility is achieved with careful unsafe code, by
/// exposing an API that is a generalised version of
/// [`crossbeam`](https://github.com/aturon/crossbeam) `Scope::spawn`
/// and the old `std::thread::scoped`: at the lowest-level a submitted
/// job returns a `JobHandle` token that ensures that job is finished
/// before any data the job might reference is invalidated
/// (i.e. manages the lifetimes). Higher-level functions will usually
/// wrap or otherwise hide the handle.
///
/// However, this comes at a cost: for easy of implementation `Pool`
/// currently only exposes "batch" jobs like `for_` and `map` and
/// these jobs take control of the whole pool. That is, one cannot
/// easily incrementally submit arbitrary closures to execute on this
/// thread pool, which is functionality that `threadpool::ScopedPool`
/// offers.
///
/// # Example
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// // a function that takes some arbitrary pool and uses the pool to
/// // manipulate data in its own stack frame.
/// fn do_work(pool: &mut Pool) {
/// let mut v = [0; 8];
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// let w = [2, 0, 1, 5, 0, 3, 0, 3];
///
/// // add the two arrays, in parallel
/// let z: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, v.iter().zip(w.iter()), |(x, y)| *x + *y).collect()
/// });
///
/// assert_eq!(z, &[5, 3, 4, 8, 3, 6, 3, 6]);
/// }
///
/// # fn main() {
/// let mut pool = Pool::new(4);
/// do_work(&mut pool);
/// # }
/// ```
pub struct Pool {
job_queue: mpsc::Sender<(Option<Job>, mpsc::Sender<Result<(), ()>>)>,
job_status: Option<Arc<Mutex<JobStatus>>>,
n_threads: usize,
}
#[derive(Copy, Clone)]
struct WorkerId { n: usize }
type WorkInner<'a> = &'a mut (FnMut(WorkerId) + Send + 'a);
struct Work {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() |
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() {
// closed, done!
Ok(true) | Err(_) => break,
Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
/// `main_fn` will manage any internal coordination required to
/// distribute chunks of work.
///
/// The job must take pains to ensure `main_fn` doesn't quit
/// before the workers do.
pub unsafe fn execute<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, data: A, gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
self.execute_nonunsafe(scope, data, gen_fn, main_fn)
}
// separate function to ensure we get `unsafe` checking inside this one
fn execute_nonunsafe<'pool, 'f, A, GenFn, WorkerFn, MainFn>(
&'pool mut self, scope: &Scope<'f>, mut data: A,
mut gen_fn: GenFn, main_fn: MainFn) -> JobHandle<'pool, 'f>
where A: 'f + Send,
GenFn: 'f + FnMut(&mut A) -> WorkerFn + Send,
WorkerFn: 'f + FnMut(WorkerId) + Send,
MainFn: 'f + FnOnce(A) + Send,
{
let n_threads = self.n_threads;
// transmutes scary? only a little: the returned `JobHandle`
// ensures safety by connecting this job to the outside stack
// frame.
let func: JobInner<'f> = Box::new(move |workers: &[mpsc::Sender<Work>]| {
assert_eq!(workers.len(), n_threads);
let mut worker_fns: Vec<_> = (0..n_threads).map(|_| gen_fn(&mut data)).collect();
for (func, worker) in worker_fns.iter_mut().zip(workers.iter()) {
let func: WorkInner = func;
let func: WorkInner<'static> = unsafe {
mem::transmute(func)
};
worker.send(Work { func: func }).unwrap();
}
main_fn(data)
});
let func: JobInner<'static> = unsafe {
mem::transmute(func)
};
let (tx, rx) = mpsc::channel();
self.job_queue.send((Some(Job { func: func }), tx)).unwrap();
let status = Arc::new(Mutex::new(JobStatus {
wait: true,
job_finished: rx,
}));
// this probably isn't quite right? what happens to older jobs
// (e.g. if a previous one was mem::forget'd)
self.job_status = Some(status.clone());
let status_ = status.clone();
scope.defer(move || {
status_.lock().unwrap().wait();
});
JobHandle {
pool: self,
status: status,
_funcs: marker::PhantomData,
}
}
}
use std::cmp::Ordering;
struct Packet<T> {
// this should be unique for a given instance of `*ParMap`
idx: usize,
data: T,
}
impl<T> PartialOrd for Packet<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl<T> Ord for Packet<T> {
// reverse the ordering, to work with the max-heap
fn cmp(&self, other: &Self) -> Ordering { other.idx.cmp(&self.idx) }
}
impl<T> PartialEq for Packet<T> {
fn eq(&self, other: &Self) -> bool { self.idx == other.idx }
}
impl<T> Eq for Packet<T> {}
/// A parallel-mapping iterator, that yields elements in the order
/// they are computed, not the order from which they are yielded by
/// the underlying iterator. Constructed by calling
/// `Pool::unordered_map`.
pub struct UnorderedParMap<'pool, 'a, T: 'a + Send> {
rx: mpsc::Receiver<Packet<T>>,
_guard: JobHandle<'pool, 'a>,
}
impl<'pool, 'a,T: 'a + Send> Iterator for UnorderedParMap<'pool , 'a, T> {
type Item = (usize, T);
fn next(&mut self) -> Option<(usize, T)> {
match self.rx.recv() {
Ok(Packet { data, idx }) => Some((idx, data)),
Err(mpsc::RecvError) => None,
}
}
}
/// A parallel-mapping iterator, that yields elements in the order
/// they are yielded by the underlying iterator. Constructed by
/// calling `Pool::map`.
pub struct ParMap<'pool, 'a, T: 'a + Send> {
unordered: UnorderedParMap<'pool, 'a, T>,
looking_for: usize,
queue: BinaryHeap<Packet<T>>
}
impl<'pool, 'a, T: Send + 'a> Iterator for ParMap<'pool, 'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if self.queue.peek().map_or(false, |x| x.idx == self.looking_for) {
// we've found what we want, so lets return it
let packet = self.queue.pop().unwrap();
self.looking_for += 1;
return Some(packet.data)
}
match self.unordered.rx.recv() {
// this could be optimised to check for `packet.idx ==
// self.looking_for` to avoid the BinaryHeap
// interaction if its what we want.
Ok(packet) => self.queue.push(packet),
// all done
Err(mpsc::RecvError) => return None,
}
}
}
}
| {
break
} | conditional_block |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if !matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi != Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_), .. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)| !should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars, .. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id, ..) = arg.pat.kind
&& !mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)| !self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id != *vid {
self.aliases.insert(bind_id, *vid);
}
} else if !self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) |
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if !projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
} | identifier_body |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if !matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi != Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_), .. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)| !should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars, .. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id, ..) = arg.pat.kind
&& !mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)| !self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id != *vid {
self.aliases.insert(bind_id, *vid);
}
} else if !self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if !projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn | (
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| fake_read | identifier_name |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if !matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi != Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_), .. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)| !should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars, .. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id, ..) = arg.pat.kind
&& !mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)| !self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind |
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id != *vid {
self.aliases.insert(bind_id, *vid);
}
} else if !self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if !projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
} | conditional_block |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if !matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
} | &mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi != Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_), .. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)| !should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars, .. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id, ..) = arg.pat.kind
&& !mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)| !self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id != *vid {
self.aliases.insert(bind_id, *vid);
}
} else if !self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if !projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
} |
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn( | random_line_split |
transform.ts | /**
* @license
* Copyright 2020 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {Message, makeMessageIdMap} from '../messages.js';
import {writeLocaleCodesModule} from '../locales.js';
import type {Locale} from '../types/locale.js';
import type {Config} from '../types/config.js';
import type {TransformOutputConfig} from '../types/modes.js';
import ts from 'typescript';
import {
isLitTemplate,
isMsgCall,
extractTemplate,
extractOptions,
generateMsgIdFromAstNode,
} from '../program-analysis.js';
import {KnownError} from '../error.js';
import {
escapeStringToEmbedInTemplateLiteral,
stringifyDiagnostics,
parseStringAsTemplateLiteral,
} from '../typescript.js';
import * as pathLib from 'path';
import {LitLocalizer} from '../index.js';
type TypeScriptTransformerFactoryFactory = (
program: ts.Program
) => ts.TransformerFactory<ts.SourceFile>;
/**
* Localizes a Lit project in transform mode.
*/
export class TransformLitLocalizer extends LitLocalizer {
config: Config & {output: TransformOutputConfig};
constructor(config: Config & {output: TransformOutputConfig}) {
super();
if (config.output.mode !== 'transform') {
throw new Error(
`Error: TransformLocalizer requires a localization config with output.mode "transform"`
);
}
this.config = config;
}
/**
* Compile the project for each locale, replacing all templates with their
* localized versions, and write to the configured locale directory structure.
*/
async build() {
this.assertTranslationsAreValid();
const {translations} = this.readTranslationsSync();
await transformOutput(
translations,
this.config,
this.config.output,
this.program
);
}
/**
* Make a map from each locale code to a function that takes a TypeScript
* Program and returns a TypeScript Transformer Factory that replaces all
* `msg` calls with localized templates.
*
* This factory is suitable for inclusion in the `before` array of the
* `customTransformers` parameter of the TypeScript `program.emit` method.
*/
transformers(): Map<Locale, TypeScriptTransformerFactoryFactory> {
const {translations} = this.readTranslationsSync();
const locales = [this.config.sourceLocale, ...this.config.targetLocales];
const factories = new Map<Locale, TypeScriptTransformerFactoryFactory>();
for (const locale of locales) {
factories.set(locale, (program: ts.Program) =>
litLocalizeTransform(
makeMessageIdMap(translations.get(locale) ?? []),
locale,
program
)
);
}
return factories;
}
}
/**
* Compile and emit the given TypeScript program using the lit-localize
* transformer.
*
* TODO(aomarks) Refactor this into the build() method above.
*/
async function transformOutput(
translationsByLocale: Map<Locale, Message[]>,
config: Config,
transformConfig: TransformOutputConfig,
program: ts.Program
) {
if (transformConfig.localeCodesModule) {
await writeLocaleCodesModule(
config.sourceLocale,
config.targetLocales,
transformConfig.localeCodesModule
);
}
// TODO(aomarks) It doesn't seem that it's possible for a TypeScript
// transformer to emit a new file, so we just have to emit for each locale.
// Need to do some more investigation into the best way to integrate this
// transformation into a real project so that the user can still use --watch
// and other tsc flags. It would also be nice to support the language server,
// so that diagnostics will show up immediately in the editor.
const opts = program.getCompilerOptions();
const outRoot = opts.outDir || '.';
for (const locale of [config.sourceLocale, ...config.targetLocales]) {
let translations;
if (locale !== config.sourceLocale) {
translations = new Map<string, Message>();
for (const message of translationsByLocale.get(locale) || []) {
translations.set(message.name, message);
}
}
opts.outDir = pathLib.join(outRoot, '/', locale);
program.emit(undefined, undefined, undefined, undefined, {
before: [litLocalizeTransform(translations, locale, program)],
});
}
}
/**
* Return a TypeScript TransformerFactory for the lit-localize transformer.
*/
export function litLocalizeTransform(
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program
): ts.TransformerFactory<ts.SourceFile> {
return (context) => {
return (file) => {
const transformer = new Transformer(
context,
translations,
locale,
program,
file
);
return ts.visitNode(file, transformer.boundVisitNode);
};
};
}
/**
* Implementation of the lit-localize TypeScript transformer.
*/
class Transformer {
private context: ts.TransformationContext;
private translations: Map<string, Message> | undefined;
private locale: string;
private typeChecker: ts.TypeChecker;
boundVisitNode = this.visitNode.bind(this);
sourceFile: ts.SourceFile;
constructor(
context: ts.TransformationContext,
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program,
sourceFile: ts.SourceFile
) {
this.context = context;
this.translations = translations;
this.locale = locale;
this.typeChecker = program.getTypeChecker();
this.sourceFile = sourceFile;
}
/**
* Top-level delegating visitor for all nodes.
*/
visitNode(node: ts.Node): ts.VisitResult<ts.Node> {
// msg('greeting', 'hello') -> 'hola'
if (isMsgCall(node, this.typeChecker)) {
return this.replaceMsgCall(node);
}
// html`<b>${msg('greeting', 'hello')}</b>` -> html`<b>hola</b>`
if (isLitTemplate(node)) {
// If an html-tagged template literal embeds a msg call, we want to
// collapse the result of that msg call into the parent template.
return tagLit(
makeTemplateLiteral(
this.recursivelyFlattenTemplate(node.template, true)
)
);
}
// import ... from '@lit/localize' -> (removed)
if (ts.isImportDeclaration(node)) {
const moduleSymbol = this.typeChecker.getSymbolAtLocation(
node.moduleSpecifier
);
if (moduleSymbol && this.isLitLocalizeModule(moduleSymbol)) {
return undefined;
}
}
if (ts.isCallExpression(node)) {
// configureTransformLocalization(...) -> {getLocale: () => "es-419"}
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_TRANSFORM_LOCALIZATION_'
)
) {
return ts.createObjectLiteral(
[
ts.createPropertyAssignment(
ts.createIdentifier('getLocale'),
ts.createArrowFunction(
undefined,
undefined,
[],
undefined,
ts.createToken(ts.SyntaxKind.EqualsGreaterThanToken),
ts.createStringLiteral(this.locale)
)
),
],
false
);
}
// configureLocalization(...) -> Error
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_LOCALIZATION_'
)
) {
// TODO(aomarks) This error is not surfaced earlier in the analysis phase
// as a nicely formatted diagnostic, but it should be.
throw new KnownError(
'Cannot use configureLocalization in transform mode. ' +
'Use configureTransformLocalization instead.'
);
}
// updateWhenLocaleChanges() -> undefined
if (
this.typeHasProperty(node.expression, '_LIT_LOCALIZE_CONTROLLER_FN_')
) {
return ts.createIdentifier('undefined');
}
}
// @localized -> removed
if (
ts.isDecorator(node) &&
ts.isCallExpression(node.expression) &&
this.typeHasProperty(
node.expression.expression,
'_LIT_LOCALIZE_DECORATOR_'
)
) {
return undefined;
}
// LOCALE_STATUS_EVENT -> "lit-localize-status"
//
// We want to replace this imported string constant with its static value so
// that we can always safely remove the '@lit/localize' module import.
//
// TODO(aomarks) Maybe we should error here instead, since lit-localize
// won't fire any of these events in transform mode? But I'm still thinking
// about the use case of an app that can run in either runtime or transform
// mode without code changes (e.g. runtime for dev, transform for
// production)...
//
// We can't tag this string const with a special property like we do with
// our exported functions, because doing so breaks lookups into
// `WindowEventMap`. So we instead identify the symbol by name, and check | if (eventSymbol.flags & ts.SymbolFlags.Alias) {
// Symbols will be aliased in the case of
// `import {LOCALE_STATUS_EVENT} ...`
// but not in the case of `import * as ...`.
eventSymbol = this.typeChecker.getAliasedSymbol(eventSymbol);
}
for (const decl of eventSymbol.declarations) {
let sourceFile: ts.Node = decl;
while (!ts.isSourceFile(sourceFile)) {
sourceFile = sourceFile.parent;
}
const sourceFileSymbol = this.typeChecker.getSymbolAtLocation(
sourceFile
);
if (sourceFileSymbol && this.isLitLocalizeModule(sourceFileSymbol)) {
return ts.createStringLiteral('lit-localize-status');
}
}
}
return ts.visitEachChild(node, this.boundVisitNode, this.context);
}
/**
* Replace a lit-localize `msg` call with the string or template corresponding
* to that message. If translations are present, use the translation.
* Otherwise, use the source template directly from the second argument.
*/
replaceMsgCall(
call: ts.CallExpression
): ts.TemplateLiteral | ts.TaggedTemplateExpression | ts.StringLiteral {
const [templateArg, optionsArg] = call.arguments;
const templateResult = extractTemplate(
templateArg,
this.sourceFile,
this.typeChecker
);
if (templateResult.error) {
throw new Error(stringifyDiagnostics([templateResult.error]));
}
const {isLitTemplate: isLitTagged} = templateResult.result;
let {template} = templateResult.result;
const optionsResult = extractOptions(optionsArg, this.sourceFile);
if (optionsResult.error) {
throw new Error(stringifyDiagnostics([optionsResult.error]));
}
const options = optionsResult.result;
const id = options.id ?? generateMsgIdFromAstNode(template, isLitTagged);
const sourceExpressions = new Map<string, ts.Expression>();
if (ts.isTemplateExpression(template)) {
for (const span of template.templateSpans) {
// TODO(aomarks) Support less brittle/more readable placeholder keys.
const key = this.sourceFile.text.slice(
span.expression.pos,
span.expression.end
);
sourceExpressions.set(key, span.expression);
}
}
// If translations are available, replace the source template from the
// second argument with the corresponding translation.
if (this.translations !== undefined) {
const translation = this.translations.get(id);
if (translation !== undefined) {
const templateLiteralBody = translation.contents
.map((content) =>
typeof content === 'string'
? escapeStringToEmbedInTemplateLiteral(content)
: content.untranslatable
)
.join('');
template = parseStringAsTemplateLiteral(templateLiteralBody);
if (ts.isTemplateExpression(template)) {
const newParts = [];
newParts.push(template.head.text);
for (const span of template.templateSpans) {
const expressionKey = templateLiteralBody.slice(
span.expression.pos - 1,
span.expression.end - 1
);
const sourceExpression = sourceExpressions.get(expressionKey);
if (sourceExpression === undefined) {
throw new Error(
`Expression in translation does not appear in source.` +
`\nLocale: ${this.locale}` +
`\nExpression: ${expressionKey}`
);
}
newParts.push(sourceExpression);
newParts.push(span.literal.text);
}
template = makeTemplateLiteral(newParts);
}
}
// TODO(aomarks) Emit a warning that a translation was missing.
}
// Nothing more to do with a simple string.
if (ts.isStringLiteral(template)) {
if (isLitTagged) {
throw new KnownError(
'Internal error: string literal cannot be html-tagged'
);
}
return template;
}
// We may have ended up with template expressions that can be represented
// more efficiently by hoisting them directly into the template.
//
// Given: html`Hello <b>${"World"}</b>`
// Generate: html`Hello <b>World</b>`
template = makeTemplateLiteral(
this.recursivelyFlattenTemplate(template, isLitTagged)
);
return isLitTagged ? tagLit(template) : template;
}
/**
* For every expression in the given template, assume that it is a simple
* identifier, and substitute it with the corresponding TypeScript node in the
* given map.
*
* Given: html`Hello ${name}` with Map(['name', StringLiteral{"World"}])
* Generate: html`Hello ${"World"}`
*/
substituteIdentsInExpressions(
template: ts.TemplateExpression,
paramValues: Map<string, ts.Expression>
): ts.TemplateLiteral {
return ts.visitEachChild(
template,
(span: ts.Node) => {
if (!ts.isTemplateSpan(span)) {
return span;
}
const expression = span.expression;
if (!ts.isIdentifier(expression)) {
throw new KnownError('Expected expression to be identifier');
}
const ident = expression.text;
const value = paramValues.get(ident);
if (value === undefined) {
throw new KnownError('No value provided');
}
return ts.createTemplateSpan(value, span.literal);
},
this.context
);
}
/**
* Deconstruct the given template literal it into a sequence of strings and
* expressions. Transform each expression using this transformer class,
* deconstruct that result in the same way, and "flatten" the result into
* the parent template wherever possible. Strings are flattened into strings,
* and strings + HTML are flattened into HTML.
*
* Examples:
*
* [1] `foo` => ['foo']
* [2] `foo${name}bar` => ['foo', Expression{name}, 'bar']
* [3] `foo${"bar"}baz` => ['foo', 'bar', 'baz']
* [4] html`<b>${html`<i>foo</i>`}</b>` => ['<b>', '<i>foo</i>', '</b>']
* [5] html`<b>${msg("foo", 'bar')}</b>` => ['<b>', 'bar', '</b>']
*/
recursivelyFlattenTemplate(
template: ts.TemplateLiteral,
isLit: boolean
): Array<string | ts.Expression> {
if (ts.isNoSubstitutionTemplateLiteral(template)) {
return [template.text];
}
const fragments: Array<string | ts.Expression> = [template.head.text];
const subsume = (expression: ts.Expression): boolean => {
if (ts.isStringLiteral(expression)) {
fragments.push(expression.text);
} else if (ts.isTemplateLiteral(expression)) {
fragments.push(...this.recursivelyFlattenTemplate(expression, false));
} else if (isLit && isLitTemplate(expression)) {
fragments.push(
...this.recursivelyFlattenTemplate(expression.template, true)
);
} else {
return false;
}
return true;
};
for (const span of template.templateSpans) {
let expression = span.expression;
// Can we directly subsume this span?
if (!subsume(expression)) {
// No, but it may still need transformation.
expression = ts.visitNode(expression, this.boundVisitNode);
// Maybe we can subsume it after transformation (e.g a `msg` call which
// is now transformed to a template)?
if (!subsume(expression)) {
// Still no, then keep the expression in a span as it was.
fragments.push(expression);
}
}
fragments.push(span.literal.text);
}
return fragments;
}
/**
* Return whether the given symbol looks like one of the lit-localize modules
* (because it exports one of the special tagged functions).
*/
isLitLocalizeModule(moduleSymbol: ts.Symbol): boolean {
if (!moduleSymbol.exports) {
return false;
}
const exports = moduleSymbol.exports.values();
for (const xport of exports as typeof exports & {
[Symbol.iterator](): Iterator<ts.Symbol>;
}) {
const type = this.typeChecker.getTypeAtLocation(xport.valueDeclaration);
const props = this.typeChecker.getPropertiesOfType(type);
if (
props.some(
(prop) =>
prop.escapedName === '_LIT_LOCALIZE_MSG_' ||
prop.escapedName === '_LIT_LOCALIZE_CONTROLLER_FN_' ||
prop.escapedName === '_LIT_LOCALIZE_DECORATOR_'
)
) {
return true;
}
}
return false;
}
/**
* Return whether the tpe of the given node is "tagged" with the given special
* identifying property (e.g. "_LIT_LOCALIZE_MSG_").
*/
typeHasProperty(
node: ts.Node,
propertyName: string
): node is ts.CallExpression {
const type = this.typeChecker.getTypeAtLocation(node);
const props = this.typeChecker.getPropertiesOfType(type);
return props.some((prop) => prop.escapedName === propertyName);
}
}
/**
* Wrap a TemplateLiteral in the lit `html` tag.
*/
function tagLit(template: ts.TemplateLiteral): ts.TaggedTemplateExpression {
return ts.createTaggedTemplate(ts.createIdentifier('html'), template);
}
/**
* Given an array of strings and template expressions (as generated by
* `recursivelyFlattenTemplate`), create the simplest TemplateLiteral node,
* where contiguous string items are collapsed into a single TemplateHead or
* TemplateSpan.
*/
function makeTemplateLiteral(
fragments: Array<string | ts.Expression>
): ts.TemplateLiteral {
let textBuf: string[] = [];
const spans = [];
for (let i = fragments.length - 1; i >= 0; i--) {
const fragment = fragments[i];
if (typeof fragment === 'string') {
textBuf.unshift(fragment);
} else {
const text = textBuf.join('');
const literal =
spans.length === 0
? ts.createTemplateTail(text)
: ts.createTemplateMiddle(text);
const span = ts.createTemplateSpan(fragment, literal);
spans.unshift(span);
textBuf = [];
}
}
if (spans.length === 0) {
return ts.createNoSubstitutionTemplateLiteral(textBuf.join(''));
}
return ts.createTemplateExpression(
ts.createTemplateHead(textBuf.join('')),
spans
);
} | // that it was declared in the lit-localize module.
let eventSymbol = this.typeChecker.getSymbolAtLocation(node);
if (eventSymbol && eventSymbol.name === 'LOCALE_STATUS_EVENT') { | random_line_split |
transform.ts | /**
* @license
* Copyright 2020 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {Message, makeMessageIdMap} from '../messages.js';
import {writeLocaleCodesModule} from '../locales.js';
import type {Locale} from '../types/locale.js';
import type {Config} from '../types/config.js';
import type {TransformOutputConfig} from '../types/modes.js';
import ts from 'typescript';
import {
isLitTemplate,
isMsgCall,
extractTemplate,
extractOptions,
generateMsgIdFromAstNode,
} from '../program-analysis.js';
import {KnownError} from '../error.js';
import {
escapeStringToEmbedInTemplateLiteral,
stringifyDiagnostics,
parseStringAsTemplateLiteral,
} from '../typescript.js';
import * as pathLib from 'path';
import {LitLocalizer} from '../index.js';
type TypeScriptTransformerFactoryFactory = (
program: ts.Program
) => ts.TransformerFactory<ts.SourceFile>;
/**
* Localizes a Lit project in transform mode.
*/
export class TransformLitLocalizer extends LitLocalizer {
config: Config & {output: TransformOutputConfig};
constructor(config: Config & {output: TransformOutputConfig}) {
super();
if (config.output.mode !== 'transform') {
throw new Error(
`Error: TransformLocalizer requires a localization config with output.mode "transform"`
);
}
this.config = config;
}
/**
* Compile the project for each locale, replacing all templates with their
* localized versions, and write to the configured locale directory structure.
*/
async | () {
this.assertTranslationsAreValid();
const {translations} = this.readTranslationsSync();
await transformOutput(
translations,
this.config,
this.config.output,
this.program
);
}
/**
* Make a map from each locale code to a function that takes a TypeScript
* Program and returns a TypeScript Transformer Factory that replaces all
* `msg` calls with localized templates.
*
* This factory is suitable for inclusion in the `before` array of the
* `customTransformers` parameter of the TypeScript `program.emit` method.
*/
transformers(): Map<Locale, TypeScriptTransformerFactoryFactory> {
const {translations} = this.readTranslationsSync();
const locales = [this.config.sourceLocale, ...this.config.targetLocales];
const factories = new Map<Locale, TypeScriptTransformerFactoryFactory>();
for (const locale of locales) {
factories.set(locale, (program: ts.Program) =>
litLocalizeTransform(
makeMessageIdMap(translations.get(locale) ?? []),
locale,
program
)
);
}
return factories;
}
}
/**
* Compile and emit the given TypeScript program using the lit-localize
* transformer.
*
* TODO(aomarks) Refactor this into the build() method above.
*/
async function transformOutput(
translationsByLocale: Map<Locale, Message[]>,
config: Config,
transformConfig: TransformOutputConfig,
program: ts.Program
) {
if (transformConfig.localeCodesModule) {
await writeLocaleCodesModule(
config.sourceLocale,
config.targetLocales,
transformConfig.localeCodesModule
);
}
// TODO(aomarks) It doesn't seem that it's possible for a TypeScript
// transformer to emit a new file, so we just have to emit for each locale.
// Need to do some more investigation into the best way to integrate this
// transformation into a real project so that the user can still use --watch
// and other tsc flags. It would also be nice to support the language server,
// so that diagnostics will show up immediately in the editor.
const opts = program.getCompilerOptions();
const outRoot = opts.outDir || '.';
for (const locale of [config.sourceLocale, ...config.targetLocales]) {
let translations;
if (locale !== config.sourceLocale) {
translations = new Map<string, Message>();
for (const message of translationsByLocale.get(locale) || []) {
translations.set(message.name, message);
}
}
opts.outDir = pathLib.join(outRoot, '/', locale);
program.emit(undefined, undefined, undefined, undefined, {
before: [litLocalizeTransform(translations, locale, program)],
});
}
}
/**
* Return a TypeScript TransformerFactory for the lit-localize transformer.
*/
export function litLocalizeTransform(
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program
): ts.TransformerFactory<ts.SourceFile> {
return (context) => {
return (file) => {
const transformer = new Transformer(
context,
translations,
locale,
program,
file
);
return ts.visitNode(file, transformer.boundVisitNode);
};
};
}
/**
* Implementation of the lit-localize TypeScript transformer.
*/
class Transformer {
private context: ts.TransformationContext;
private translations: Map<string, Message> | undefined;
private locale: string;
private typeChecker: ts.TypeChecker;
boundVisitNode = this.visitNode.bind(this);
sourceFile: ts.SourceFile;
constructor(
context: ts.TransformationContext,
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program,
sourceFile: ts.SourceFile
) {
this.context = context;
this.translations = translations;
this.locale = locale;
this.typeChecker = program.getTypeChecker();
this.sourceFile = sourceFile;
}
/**
* Top-level delegating visitor for all nodes.
*/
visitNode(node: ts.Node): ts.VisitResult<ts.Node> {
// msg('greeting', 'hello') -> 'hola'
if (isMsgCall(node, this.typeChecker)) {
return this.replaceMsgCall(node);
}
// html`<b>${msg('greeting', 'hello')}</b>` -> html`<b>hola</b>`
if (isLitTemplate(node)) {
// If an html-tagged template literal embeds a msg call, we want to
// collapse the result of that msg call into the parent template.
return tagLit(
makeTemplateLiteral(
this.recursivelyFlattenTemplate(node.template, true)
)
);
}
// import ... from '@lit/localize' -> (removed)
if (ts.isImportDeclaration(node)) {
const moduleSymbol = this.typeChecker.getSymbolAtLocation(
node.moduleSpecifier
);
if (moduleSymbol && this.isLitLocalizeModule(moduleSymbol)) {
return undefined;
}
}
if (ts.isCallExpression(node)) {
// configureTransformLocalization(...) -> {getLocale: () => "es-419"}
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_TRANSFORM_LOCALIZATION_'
)
) {
return ts.createObjectLiteral(
[
ts.createPropertyAssignment(
ts.createIdentifier('getLocale'),
ts.createArrowFunction(
undefined,
undefined,
[],
undefined,
ts.createToken(ts.SyntaxKind.EqualsGreaterThanToken),
ts.createStringLiteral(this.locale)
)
),
],
false
);
}
// configureLocalization(...) -> Error
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_LOCALIZATION_'
)
) {
// TODO(aomarks) This error is not surfaced earlier in the analysis phase
// as a nicely formatted diagnostic, but it should be.
throw new KnownError(
'Cannot use configureLocalization in transform mode. ' +
'Use configureTransformLocalization instead.'
);
}
// updateWhenLocaleChanges() -> undefined
if (
this.typeHasProperty(node.expression, '_LIT_LOCALIZE_CONTROLLER_FN_')
) {
return ts.createIdentifier('undefined');
}
}
// @localized -> removed
if (
ts.isDecorator(node) &&
ts.isCallExpression(node.expression) &&
this.typeHasProperty(
node.expression.expression,
'_LIT_LOCALIZE_DECORATOR_'
)
) {
return undefined;
}
// LOCALE_STATUS_EVENT -> "lit-localize-status"
//
// We want to replace this imported string constant with its static value so
// that we can always safely remove the '@lit/localize' module import.
//
// TODO(aomarks) Maybe we should error here instead, since lit-localize
// won't fire any of these events in transform mode? But I'm still thinking
// about the use case of an app that can run in either runtime or transform
// mode without code changes (e.g. runtime for dev, transform for
// production)...
//
// We can't tag this string const with a special property like we do with
// our exported functions, because doing so breaks lookups into
// `WindowEventMap`. So we instead identify the symbol by name, and check
// that it was declared in the lit-localize module.
let eventSymbol = this.typeChecker.getSymbolAtLocation(node);
if (eventSymbol && eventSymbol.name === 'LOCALE_STATUS_EVENT') {
if (eventSymbol.flags & ts.SymbolFlags.Alias) {
// Symbols will be aliased in the case of
// `import {LOCALE_STATUS_EVENT} ...`
// but not in the case of `import * as ...`.
eventSymbol = this.typeChecker.getAliasedSymbol(eventSymbol);
}
for (const decl of eventSymbol.declarations) {
let sourceFile: ts.Node = decl;
while (!ts.isSourceFile(sourceFile)) {
sourceFile = sourceFile.parent;
}
const sourceFileSymbol = this.typeChecker.getSymbolAtLocation(
sourceFile
);
if (sourceFileSymbol && this.isLitLocalizeModule(sourceFileSymbol)) {
return ts.createStringLiteral('lit-localize-status');
}
}
}
return ts.visitEachChild(node, this.boundVisitNode, this.context);
}
/**
* Replace a lit-localize `msg` call with the string or template corresponding
* to that message. If translations are present, use the translation.
* Otherwise, use the source template directly from the second argument.
*/
replaceMsgCall(
call: ts.CallExpression
): ts.TemplateLiteral | ts.TaggedTemplateExpression | ts.StringLiteral {
const [templateArg, optionsArg] = call.arguments;
const templateResult = extractTemplate(
templateArg,
this.sourceFile,
this.typeChecker
);
if (templateResult.error) {
throw new Error(stringifyDiagnostics([templateResult.error]));
}
const {isLitTemplate: isLitTagged} = templateResult.result;
let {template} = templateResult.result;
const optionsResult = extractOptions(optionsArg, this.sourceFile);
if (optionsResult.error) {
throw new Error(stringifyDiagnostics([optionsResult.error]));
}
const options = optionsResult.result;
const id = options.id ?? generateMsgIdFromAstNode(template, isLitTagged);
const sourceExpressions = new Map<string, ts.Expression>();
if (ts.isTemplateExpression(template)) {
for (const span of template.templateSpans) {
// TODO(aomarks) Support less brittle/more readable placeholder keys.
const key = this.sourceFile.text.slice(
span.expression.pos,
span.expression.end
);
sourceExpressions.set(key, span.expression);
}
}
// If translations are available, replace the source template from the
// second argument with the corresponding translation.
if (this.translations !== undefined) {
const translation = this.translations.get(id);
if (translation !== undefined) {
const templateLiteralBody = translation.contents
.map((content) =>
typeof content === 'string'
? escapeStringToEmbedInTemplateLiteral(content)
: content.untranslatable
)
.join('');
template = parseStringAsTemplateLiteral(templateLiteralBody);
if (ts.isTemplateExpression(template)) {
const newParts = [];
newParts.push(template.head.text);
for (const span of template.templateSpans) {
const expressionKey = templateLiteralBody.slice(
span.expression.pos - 1,
span.expression.end - 1
);
const sourceExpression = sourceExpressions.get(expressionKey);
if (sourceExpression === undefined) {
throw new Error(
`Expression in translation does not appear in source.` +
`\nLocale: ${this.locale}` +
`\nExpression: ${expressionKey}`
);
}
newParts.push(sourceExpression);
newParts.push(span.literal.text);
}
template = makeTemplateLiteral(newParts);
}
}
// TODO(aomarks) Emit a warning that a translation was missing.
}
// Nothing more to do with a simple string.
if (ts.isStringLiteral(template)) {
if (isLitTagged) {
throw new KnownError(
'Internal error: string literal cannot be html-tagged'
);
}
return template;
}
// We may have ended up with template expressions that can be represented
// more efficiently by hoisting them directly into the template.
//
// Given: html`Hello <b>${"World"}</b>`
// Generate: html`Hello <b>World</b>`
template = makeTemplateLiteral(
this.recursivelyFlattenTemplate(template, isLitTagged)
);
return isLitTagged ? tagLit(template) : template;
}
/**
* For every expression in the given template, assume that it is a simple
* identifier, and substitute it with the corresponding TypeScript node in the
* given map.
*
* Given: html`Hello ${name}` with Map(['name', StringLiteral{"World"}])
* Generate: html`Hello ${"World"}`
*/
substituteIdentsInExpressions(
template: ts.TemplateExpression,
paramValues: Map<string, ts.Expression>
): ts.TemplateLiteral {
return ts.visitEachChild(
template,
(span: ts.Node) => {
if (!ts.isTemplateSpan(span)) {
return span;
}
const expression = span.expression;
if (!ts.isIdentifier(expression)) {
throw new KnownError('Expected expression to be identifier');
}
const ident = expression.text;
const value = paramValues.get(ident);
if (value === undefined) {
throw new KnownError('No value provided');
}
return ts.createTemplateSpan(value, span.literal);
},
this.context
);
}
/**
* Deconstruct the given template literal it into a sequence of strings and
* expressions. Transform each expression using this transformer class,
* deconstruct that result in the same way, and "flatten" the result into
* the parent template wherever possible. Strings are flattened into strings,
* and strings + HTML are flattened into HTML.
*
* Examples:
*
* [1] `foo` => ['foo']
* [2] `foo${name}bar` => ['foo', Expression{name}, 'bar']
* [3] `foo${"bar"}baz` => ['foo', 'bar', 'baz']
* [4] html`<b>${html`<i>foo</i>`}</b>` => ['<b>', '<i>foo</i>', '</b>']
* [5] html`<b>${msg("foo", 'bar')}</b>` => ['<b>', 'bar', '</b>']
*/
recursivelyFlattenTemplate(
template: ts.TemplateLiteral,
isLit: boolean
): Array<string | ts.Expression> {
if (ts.isNoSubstitutionTemplateLiteral(template)) {
return [template.text];
}
const fragments: Array<string | ts.Expression> = [template.head.text];
const subsume = (expression: ts.Expression): boolean => {
if (ts.isStringLiteral(expression)) {
fragments.push(expression.text);
} else if (ts.isTemplateLiteral(expression)) {
fragments.push(...this.recursivelyFlattenTemplate(expression, false));
} else if (isLit && isLitTemplate(expression)) {
fragments.push(
...this.recursivelyFlattenTemplate(expression.template, true)
);
} else {
return false;
}
return true;
};
for (const span of template.templateSpans) {
let expression = span.expression;
// Can we directly subsume this span?
if (!subsume(expression)) {
// No, but it may still need transformation.
expression = ts.visitNode(expression, this.boundVisitNode);
// Maybe we can subsume it after transformation (e.g a `msg` call which
// is now transformed to a template)?
if (!subsume(expression)) {
// Still no, then keep the expression in a span as it was.
fragments.push(expression);
}
}
fragments.push(span.literal.text);
}
return fragments;
}
/**
* Return whether the given symbol looks like one of the lit-localize modules
* (because it exports one of the special tagged functions).
*/
isLitLocalizeModule(moduleSymbol: ts.Symbol): boolean {
if (!moduleSymbol.exports) {
return false;
}
const exports = moduleSymbol.exports.values();
for (const xport of exports as typeof exports & {
[Symbol.iterator](): Iterator<ts.Symbol>;
}) {
const type = this.typeChecker.getTypeAtLocation(xport.valueDeclaration);
const props = this.typeChecker.getPropertiesOfType(type);
if (
props.some(
(prop) =>
prop.escapedName === '_LIT_LOCALIZE_MSG_' ||
prop.escapedName === '_LIT_LOCALIZE_CONTROLLER_FN_' ||
prop.escapedName === '_LIT_LOCALIZE_DECORATOR_'
)
) {
return true;
}
}
return false;
}
/**
* Return whether the tpe of the given node is "tagged" with the given special
* identifying property (e.g. "_LIT_LOCALIZE_MSG_").
*/
typeHasProperty(
node: ts.Node,
propertyName: string
): node is ts.CallExpression {
const type = this.typeChecker.getTypeAtLocation(node);
const props = this.typeChecker.getPropertiesOfType(type);
return props.some((prop) => prop.escapedName === propertyName);
}
}
/**
* Wrap a TemplateLiteral in the lit `html` tag.
*/
function tagLit(template: ts.TemplateLiteral): ts.TaggedTemplateExpression {
return ts.createTaggedTemplate(ts.createIdentifier('html'), template);
}
/**
* Given an array of strings and template expressions (as generated by
* `recursivelyFlattenTemplate`), create the simplest TemplateLiteral node,
* where contiguous string items are collapsed into a single TemplateHead or
* TemplateSpan.
*/
function makeTemplateLiteral(
fragments: Array<string | ts.Expression>
): ts.TemplateLiteral {
let textBuf: string[] = [];
const spans = [];
for (let i = fragments.length - 1; i >= 0; i--) {
const fragment = fragments[i];
if (typeof fragment === 'string') {
textBuf.unshift(fragment);
} else {
const text = textBuf.join('');
const literal =
spans.length === 0
? ts.createTemplateTail(text)
: ts.createTemplateMiddle(text);
const span = ts.createTemplateSpan(fragment, literal);
spans.unshift(span);
textBuf = [];
}
}
if (spans.length === 0) {
return ts.createNoSubstitutionTemplateLiteral(textBuf.join(''));
}
return ts.createTemplateExpression(
ts.createTemplateHead(textBuf.join('')),
spans
);
}
| build | identifier_name |
transform.ts | /**
* @license
* Copyright 2020 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {Message, makeMessageIdMap} from '../messages.js';
import {writeLocaleCodesModule} from '../locales.js';
import type {Locale} from '../types/locale.js';
import type {Config} from '../types/config.js';
import type {TransformOutputConfig} from '../types/modes.js';
import ts from 'typescript';
import {
isLitTemplate,
isMsgCall,
extractTemplate,
extractOptions,
generateMsgIdFromAstNode,
} from '../program-analysis.js';
import {KnownError} from '../error.js';
import {
escapeStringToEmbedInTemplateLiteral,
stringifyDiagnostics,
parseStringAsTemplateLiteral,
} from '../typescript.js';
import * as pathLib from 'path';
import {LitLocalizer} from '../index.js';
type TypeScriptTransformerFactoryFactory = (
program: ts.Program
) => ts.TransformerFactory<ts.SourceFile>;
/**
* Localizes a Lit project in transform mode.
*/
export class TransformLitLocalizer extends LitLocalizer {
config: Config & {output: TransformOutputConfig};
constructor(config: Config & {output: TransformOutputConfig}) {
super();
if (config.output.mode !== 'transform') {
throw new Error(
`Error: TransformLocalizer requires a localization config with output.mode "transform"`
);
}
this.config = config;
}
/**
* Compile the project for each locale, replacing all templates with their
* localized versions, and write to the configured locale directory structure.
*/
async build() {
this.assertTranslationsAreValid();
const {translations} = this.readTranslationsSync();
await transformOutput(
translations,
this.config,
this.config.output,
this.program
);
}
/**
* Make a map from each locale code to a function that takes a TypeScript
* Program and returns a TypeScript Transformer Factory that replaces all
* `msg` calls with localized templates.
*
* This factory is suitable for inclusion in the `before` array of the
* `customTransformers` parameter of the TypeScript `program.emit` method.
*/
transformers(): Map<Locale, TypeScriptTransformerFactoryFactory> {
const {translations} = this.readTranslationsSync();
const locales = [this.config.sourceLocale, ...this.config.targetLocales];
const factories = new Map<Locale, TypeScriptTransformerFactoryFactory>();
for (const locale of locales) {
factories.set(locale, (program: ts.Program) =>
litLocalizeTransform(
makeMessageIdMap(translations.get(locale) ?? []),
locale,
program
)
);
}
return factories;
}
}
/**
* Compile and emit the given TypeScript program using the lit-localize
* transformer.
*
* TODO(aomarks) Refactor this into the build() method above.
*/
async function transformOutput(
translationsByLocale: Map<Locale, Message[]>,
config: Config,
transformConfig: TransformOutputConfig,
program: ts.Program
) {
if (transformConfig.localeCodesModule) {
await writeLocaleCodesModule(
config.sourceLocale,
config.targetLocales,
transformConfig.localeCodesModule
);
}
// TODO(aomarks) It doesn't seem that it's possible for a TypeScript
// transformer to emit a new file, so we just have to emit for each locale.
// Need to do some more investigation into the best way to integrate this
// transformation into a real project so that the user can still use --watch
// and other tsc flags. It would also be nice to support the language server,
// so that diagnostics will show up immediately in the editor.
const opts = program.getCompilerOptions();
const outRoot = opts.outDir || '.';
for (const locale of [config.sourceLocale, ...config.targetLocales]) {
let translations;
if (locale !== config.sourceLocale) {
translations = new Map<string, Message>();
for (const message of translationsByLocale.get(locale) || []) {
translations.set(message.name, message);
}
}
opts.outDir = pathLib.join(outRoot, '/', locale);
program.emit(undefined, undefined, undefined, undefined, {
before: [litLocalizeTransform(translations, locale, program)],
});
}
}
/**
* Return a TypeScript TransformerFactory for the lit-localize transformer.
*/
export function litLocalizeTransform(
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program
): ts.TransformerFactory<ts.SourceFile> {
return (context) => {
return (file) => {
const transformer = new Transformer(
context,
translations,
locale,
program,
file
);
return ts.visitNode(file, transformer.boundVisitNode);
};
};
}
/**
* Implementation of the lit-localize TypeScript transformer.
*/
class Transformer {
private context: ts.TransformationContext;
private translations: Map<string, Message> | undefined;
private locale: string;
private typeChecker: ts.TypeChecker;
boundVisitNode = this.visitNode.bind(this);
sourceFile: ts.SourceFile;
constructor(
context: ts.TransformationContext,
translations: Map<string, Message> | undefined,
locale: string,
program: ts.Program,
sourceFile: ts.SourceFile
) {
this.context = context;
this.translations = translations;
this.locale = locale;
this.typeChecker = program.getTypeChecker();
this.sourceFile = sourceFile;
}
/**
* Top-level delegating visitor for all nodes.
*/
visitNode(node: ts.Node): ts.VisitResult<ts.Node> {
// msg('greeting', 'hello') -> 'hola'
if (isMsgCall(node, this.typeChecker)) {
return this.replaceMsgCall(node);
}
// html`<b>${msg('greeting', 'hello')}</b>` -> html`<b>hola</b>`
if (isLitTemplate(node)) {
// If an html-tagged template literal embeds a msg call, we want to
// collapse the result of that msg call into the parent template.
return tagLit(
makeTemplateLiteral(
this.recursivelyFlattenTemplate(node.template, true)
)
);
}
// import ... from '@lit/localize' -> (removed)
if (ts.isImportDeclaration(node)) {
const moduleSymbol = this.typeChecker.getSymbolAtLocation(
node.moduleSpecifier
);
if (moduleSymbol && this.isLitLocalizeModule(moduleSymbol)) {
return undefined;
}
}
if (ts.isCallExpression(node)) {
// configureTransformLocalization(...) -> {getLocale: () => "es-419"}
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_TRANSFORM_LOCALIZATION_'
)
) {
return ts.createObjectLiteral(
[
ts.createPropertyAssignment(
ts.createIdentifier('getLocale'),
ts.createArrowFunction(
undefined,
undefined,
[],
undefined,
ts.createToken(ts.SyntaxKind.EqualsGreaterThanToken),
ts.createStringLiteral(this.locale)
)
),
],
false
);
}
// configureLocalization(...) -> Error
if (
this.typeHasProperty(
node.expression,
'_LIT_LOCALIZE_CONFIGURE_LOCALIZATION_'
)
) {
// TODO(aomarks) This error is not surfaced earlier in the analysis phase
// as a nicely formatted diagnostic, but it should be.
throw new KnownError(
'Cannot use configureLocalization in transform mode. ' +
'Use configureTransformLocalization instead.'
);
}
// updateWhenLocaleChanges() -> undefined
if (
this.typeHasProperty(node.expression, '_LIT_LOCALIZE_CONTROLLER_FN_')
) {
return ts.createIdentifier('undefined');
}
}
// @localized -> removed
if (
ts.isDecorator(node) &&
ts.isCallExpression(node.expression) &&
this.typeHasProperty(
node.expression.expression,
'_LIT_LOCALIZE_DECORATOR_'
)
) {
return undefined;
}
// LOCALE_STATUS_EVENT -> "lit-localize-status"
//
// We want to replace this imported string constant with its static value so
// that we can always safely remove the '@lit/localize' module import.
//
// TODO(aomarks) Maybe we should error here instead, since lit-localize
// won't fire any of these events in transform mode? But I'm still thinking
// about the use case of an app that can run in either runtime or transform
// mode without code changes (e.g. runtime for dev, transform for
// production)...
//
// We can't tag this string const with a special property like we do with
// our exported functions, because doing so breaks lookups into
// `WindowEventMap`. So we instead identify the symbol by name, and check
// that it was declared in the lit-localize module.
let eventSymbol = this.typeChecker.getSymbolAtLocation(node);
if (eventSymbol && eventSymbol.name === 'LOCALE_STATUS_EVENT') {
if (eventSymbol.flags & ts.SymbolFlags.Alias) {
// Symbols will be aliased in the case of
// `import {LOCALE_STATUS_EVENT} ...`
// but not in the case of `import * as ...`.
eventSymbol = this.typeChecker.getAliasedSymbol(eventSymbol);
}
for (const decl of eventSymbol.declarations) {
let sourceFile: ts.Node = decl;
while (!ts.isSourceFile(sourceFile)) {
sourceFile = sourceFile.parent;
}
const sourceFileSymbol = this.typeChecker.getSymbolAtLocation(
sourceFile
);
if (sourceFileSymbol && this.isLitLocalizeModule(sourceFileSymbol)) {
return ts.createStringLiteral('lit-localize-status');
}
}
}
return ts.visitEachChild(node, this.boundVisitNode, this.context);
}
/**
* Replace a lit-localize `msg` call with the string or template corresponding
* to that message. If translations are present, use the translation.
* Otherwise, use the source template directly from the second argument.
*/
replaceMsgCall(
call: ts.CallExpression
): ts.TemplateLiteral | ts.TaggedTemplateExpression | ts.StringLiteral {
const [templateArg, optionsArg] = call.arguments;
const templateResult = extractTemplate(
templateArg,
this.sourceFile,
this.typeChecker
);
if (templateResult.error) {
throw new Error(stringifyDiagnostics([templateResult.error]));
}
const {isLitTemplate: isLitTagged} = templateResult.result;
let {template} = templateResult.result;
const optionsResult = extractOptions(optionsArg, this.sourceFile);
if (optionsResult.error) {
throw new Error(stringifyDiagnostics([optionsResult.error]));
}
const options = optionsResult.result;
const id = options.id ?? generateMsgIdFromAstNode(template, isLitTagged);
const sourceExpressions = new Map<string, ts.Expression>();
if (ts.isTemplateExpression(template)) |
// If translations are available, replace the source template from the
// second argument with the corresponding translation.
if (this.translations !== undefined) {
const translation = this.translations.get(id);
if (translation !== undefined) {
const templateLiteralBody = translation.contents
.map((content) =>
typeof content === 'string'
? escapeStringToEmbedInTemplateLiteral(content)
: content.untranslatable
)
.join('');
template = parseStringAsTemplateLiteral(templateLiteralBody);
if (ts.isTemplateExpression(template)) {
const newParts = [];
newParts.push(template.head.text);
for (const span of template.templateSpans) {
const expressionKey = templateLiteralBody.slice(
span.expression.pos - 1,
span.expression.end - 1
);
const sourceExpression = sourceExpressions.get(expressionKey);
if (sourceExpression === undefined) {
throw new Error(
`Expression in translation does not appear in source.` +
`\nLocale: ${this.locale}` +
`\nExpression: ${expressionKey}`
);
}
newParts.push(sourceExpression);
newParts.push(span.literal.text);
}
template = makeTemplateLiteral(newParts);
}
}
// TODO(aomarks) Emit a warning that a translation was missing.
}
// Nothing more to do with a simple string.
if (ts.isStringLiteral(template)) {
if (isLitTagged) {
throw new KnownError(
'Internal error: string literal cannot be html-tagged'
);
}
return template;
}
// We may have ended up with template expressions that can be represented
// more efficiently by hoisting them directly into the template.
//
// Given: html`Hello <b>${"World"}</b>`
// Generate: html`Hello <b>World</b>`
template = makeTemplateLiteral(
this.recursivelyFlattenTemplate(template, isLitTagged)
);
return isLitTagged ? tagLit(template) : template;
}
/**
* For every expression in the given template, assume that it is a simple
* identifier, and substitute it with the corresponding TypeScript node in the
* given map.
*
* Given: html`Hello ${name}` with Map(['name', StringLiteral{"World"}])
* Generate: html`Hello ${"World"}`
*/
substituteIdentsInExpressions(
template: ts.TemplateExpression,
paramValues: Map<string, ts.Expression>
): ts.TemplateLiteral {
return ts.visitEachChild(
template,
(span: ts.Node) => {
if (!ts.isTemplateSpan(span)) {
return span;
}
const expression = span.expression;
if (!ts.isIdentifier(expression)) {
throw new KnownError('Expected expression to be identifier');
}
const ident = expression.text;
const value = paramValues.get(ident);
if (value === undefined) {
throw new KnownError('No value provided');
}
return ts.createTemplateSpan(value, span.literal);
},
this.context
);
}
/**
* Deconstruct the given template literal it into a sequence of strings and
* expressions. Transform each expression using this transformer class,
* deconstruct that result in the same way, and "flatten" the result into
* the parent template wherever possible. Strings are flattened into strings,
* and strings + HTML are flattened into HTML.
*
* Examples:
*
* [1] `foo` => ['foo']
* [2] `foo${name}bar` => ['foo', Expression{name}, 'bar']
* [3] `foo${"bar"}baz` => ['foo', 'bar', 'baz']
* [4] html`<b>${html`<i>foo</i>`}</b>` => ['<b>', '<i>foo</i>', '</b>']
* [5] html`<b>${msg("foo", 'bar')}</b>` => ['<b>', 'bar', '</b>']
*/
recursivelyFlattenTemplate(
template: ts.TemplateLiteral,
isLit: boolean
): Array<string | ts.Expression> {
if (ts.isNoSubstitutionTemplateLiteral(template)) {
return [template.text];
}
const fragments: Array<string | ts.Expression> = [template.head.text];
const subsume = (expression: ts.Expression): boolean => {
if (ts.isStringLiteral(expression)) {
fragments.push(expression.text);
} else if (ts.isTemplateLiteral(expression)) {
fragments.push(...this.recursivelyFlattenTemplate(expression, false));
} else if (isLit && isLitTemplate(expression)) {
fragments.push(
...this.recursivelyFlattenTemplate(expression.template, true)
);
} else {
return false;
}
return true;
};
for (const span of template.templateSpans) {
let expression = span.expression;
// Can we directly subsume this span?
if (!subsume(expression)) {
// No, but it may still need transformation.
expression = ts.visitNode(expression, this.boundVisitNode);
// Maybe we can subsume it after transformation (e.g a `msg` call which
// is now transformed to a template)?
if (!subsume(expression)) {
// Still no, then keep the expression in a span as it was.
fragments.push(expression);
}
}
fragments.push(span.literal.text);
}
return fragments;
}
/**
* Return whether the given symbol looks like one of the lit-localize modules
* (because it exports one of the special tagged functions).
*/
isLitLocalizeModule(moduleSymbol: ts.Symbol): boolean {
if (!moduleSymbol.exports) {
return false;
}
const exports = moduleSymbol.exports.values();
for (const xport of exports as typeof exports & {
[Symbol.iterator](): Iterator<ts.Symbol>;
}) {
const type = this.typeChecker.getTypeAtLocation(xport.valueDeclaration);
const props = this.typeChecker.getPropertiesOfType(type);
if (
props.some(
(prop) =>
prop.escapedName === '_LIT_LOCALIZE_MSG_' ||
prop.escapedName === '_LIT_LOCALIZE_CONTROLLER_FN_' ||
prop.escapedName === '_LIT_LOCALIZE_DECORATOR_'
)
) {
return true;
}
}
return false;
}
/**
* Return whether the tpe of the given node is "tagged" with the given special
* identifying property (e.g. "_LIT_LOCALIZE_MSG_").
*/
typeHasProperty(
node: ts.Node,
propertyName: string
): node is ts.CallExpression {
const type = this.typeChecker.getTypeAtLocation(node);
const props = this.typeChecker.getPropertiesOfType(type);
return props.some((prop) => prop.escapedName === propertyName);
}
}
/**
* Wrap a TemplateLiteral in the lit `html` tag.
*/
function tagLit(template: ts.TemplateLiteral): ts.TaggedTemplateExpression {
return ts.createTaggedTemplate(ts.createIdentifier('html'), template);
}
/**
* Given an array of strings and template expressions (as generated by
* `recursivelyFlattenTemplate`), create the simplest TemplateLiteral node,
* where contiguous string items are collapsed into a single TemplateHead or
* TemplateSpan.
*/
function makeTemplateLiteral(
fragments: Array<string | ts.Expression>
): ts.TemplateLiteral {
let textBuf: string[] = [];
const spans = [];
for (let i = fragments.length - 1; i >= 0; i--) {
const fragment = fragments[i];
if (typeof fragment === 'string') {
textBuf.unshift(fragment);
} else {
const text = textBuf.join('');
const literal =
spans.length === 0
? ts.createTemplateTail(text)
: ts.createTemplateMiddle(text);
const span = ts.createTemplateSpan(fragment, literal);
spans.unshift(span);
textBuf = [];
}
}
if (spans.length === 0) {
return ts.createNoSubstitutionTemplateLiteral(textBuf.join(''));
}
return ts.createTemplateExpression(
ts.createTemplateHead(textBuf.join('')),
spans
);
}
| {
for (const span of template.templateSpans) {
// TODO(aomarks) Support less brittle/more readable placeholder keys.
const key = this.sourceFile.text.slice(
span.expression.pos,
span.expression.end
);
sourceExpressions.set(key, span.expression);
}
} | conditional_block |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
}
}
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point, .. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn | (&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet {
version: 0,
public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops != 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
}
| handle_onion_message | identifier_name |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
}
}
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point, .. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet { | public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops != 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
} | version: 0, | random_line_split |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize |
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point, .. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet {
version: 0,
public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops != 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
}
| {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
}
} | identifier_body |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n| !statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo ! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort !
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput { | qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
} | random_line_split | |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() |
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n| !statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo ! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort !
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| {
return Err(DKGError::PrivateKeyInvalid);
} | conditional_block |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> |
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n| !statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo ! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort !
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
} | identifier_body |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn | <R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n| !statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo ! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort !
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| new_rand | identifier_name |
paging.rs | // Copyright (c) 2017 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
& !(BasePageSize::SIZE - 1)
& !(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits()) != 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits()) != 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits()) != 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() | else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index: {}", recursive_pgt_idx);
unsafe {
ROOT_PAGE_TABLE = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap()) as usize
& !(BasePageSize::SIZE - 1);
*recursive_pgt.offset(511) = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap());
for i in recursive_pgt_idx + 2..511 {
*recursive_pgt.offset(i.try_into().unwrap()) = 0;
}
//flush TLB
controlregs::cr3_write(controlregs::cr3());
}
}
| {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} | conditional_block |
paging.rs | // Copyright (c) 2017 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
& !(BasePageSize::SIZE - 1)
& !(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits()) != 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits()) != 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits()) != 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum | {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index: {}", recursive_pgt_idx);
unsafe {
ROOT_PAGE_TABLE = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap()) as usize
& !(BasePageSize::SIZE - 1);
*recursive_pgt.offset(511) = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap());
for i in recursive_pgt_idx + 2..511 {
*recursive_pgt.offset(i.try_into().unwrap()) = 0;
}
//flush TLB
controlregs::cr3_write(controlregs::cr3());
}
}
| BasePageSize | identifier_name |
paging.rs | // Copyright (c) 2017 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
& !(BasePageSize::SIZE - 1)
& !(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits()) != 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits()) != 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits()) != 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> |
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index: {}", recursive_pgt_idx);
unsafe {
ROOT_PAGE_TABLE = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap()) as usize
& !(BasePageSize::SIZE - 1);
*recursive_pgt.offset(511) = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap());
for i in recursive_pgt_idx + 2..511 {
*recursive_pgt.offset(i.try_into().unwrap()) = 0;
}
//flush TLB
controlregs::cr3_write(controlregs::cr3());
}
}
| {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
} | identifier_body |
paging.rs | // Copyright (c) 2017 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
& !(BasePageSize::SIZE - 1)
& !(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits()) != 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits()) != 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits()) != 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD; |
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index: {}", recursive_pgt_idx);
unsafe {
ROOT_PAGE_TABLE = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap()) as usize
& !(BasePageSize::SIZE - 1);
*recursive_pgt.offset(511) = *recursive_pgt.offset(recursive_pgt_idx.try_into().unwrap());
for i in recursive_pgt_idx + 2..511 {
*recursive_pgt.offset(i.try_into().unwrap()) = 0;
}
//flush TLB
controlregs::cr3_write(controlregs::cr3());
}
} | } | random_line_split |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if !reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn | (&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| finalize_previous_epoch | identifier_name |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone()) | None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if !reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
} | } else { | random_line_split |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> |
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if !reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
} | identifier_body |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number | else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if !reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} | conditional_block |
variable_def.py | '''
variable_def.py
Copyright 2012 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import itertools
import phply.phpast as phpast
from core.nodes.node_rep import NodeRep
from core.vulnerabilities.definitions import get_vulnty_for_sec
class VariableDef(NodeRep):
'''
Representation for the AST Variable Definition.
(...)
'''
USER_VARS = ('$_GET', '$_POST', '$_COOKIES', '$_REQUEST')
def __init__(self, name, lineno, scope, ast_node=None):
NodeRep.__init__(self, name, lineno, ast_node=ast_node)
# Containing Scope.
self._scope = scope
# Parent VariableDef
self._parents = []
# Ancestors AST FunctionCall nodes
self.funccall_nodes = []
# Ancestors AST Variable nodes
self.var_nodes = []
# Is this var controlled by user?
self._controlled_by_user = None
# Vulns this variable is safe for.
self._safe_for = []
# Being 'root' means that this var doesn't depend on any other.
self._is_root = True if (name in VariableDef.USER_VARS) else None
# Request parameter name, source for a possible vuln.
self._taint_source = None
# Is object property?
self._object_property = False
# Anon var? (param var in functioncall).
self._anon_var = False
@property
def is_root(self):
'''
A variable is said to be 'root' when it has no ancestor or when
its ancestor's name is in USER_VARS
'''
if self._is_root is None:
if not self.parents:
self._is_root = True
else:
self._is_root = False
return self._is_root
@is_root.setter
def is_root(self, is_root):
self._is_root = is_root
@property
def parents(self):
'''
Get this var's parent variable
'''
if self._is_root:
return None
if not self._parents:
# Function calls - add return values of functions as parents
self.funccall_nodes = funccall_nodes = self._get_ancestor_funccalls(self._ast_node)
for n in funccall_nodes:
if hasattr(n, '_obj'):
called_obj = n._obj.get_called_obj()
if called_obj:
for var in called_obj._return_vars:
self._parents.append(var)
# Variables
self.var_nodes = varnodes = self._get_ancestor_vars(self._ast_node)
if varnodes:
for varnode in varnodes:
if getattr(varnode,'_parent_node', None) \
and type(varnode._parent_node) is phpast.ObjectProperty \
and varnode.name == '$this':
name = varnode.name + '->' + varnode._parent_node.name
parent_var = self._scope.get_root_scope()._parent_scope.get_var(name)
if self != parent_var:
self._parents.append(self._scope.get_root_scope()._parent_scope.get_var(name))
# All other vars
# We should not set ourself as parent
parent_var = self._scope.get_var(varnode.name)
if self != parent_var:
self._parents.append(parent_var)
return self._parents
@parents.setter
def parents(self, parents):
self._parents = parents
def add_parent(self, parent):
self._parents.append(parent)
@property
def controlled_by_user(self):
'''
Returns bool that indicates if this variable is tainted.
'''
#cbusr = self._controlled_by_user
#cbusr = None # no cache
#if cbusr is None:
cbusr = False #todo look at this
if self.is_root:
if self._name in VariableDef.USER_VARS:
cbusr = True
else:
cbusr = False
else:
# Look at parents
for parent in self.parents:
# todo look at this hasattr
if hasattr(parent, 'controlled_by_user') and parent.controlled_by_user == True:
cbusr = True
#self._controlled_by_user = cbusr
return cbusr
@property
def taint_source(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
$a = $_GET['test'];
$b = $a . $_GET['ok'];
print $b;
$b taint source is ['test', 'ok']
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
vars = []
for item in reversed(deps):
if not item.is_root:
for node in item.var_nodes:
vars.append(node)
sources = []
for v in vars:
if hasattr(v, '_parent_node') and type(v._parent_node) is phpast.ArrayOffset:
sources.append(v._parent_node.expr)
return sources
# todo remove below when finished
@property
def taint_source_old(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
v = deps[-2].var_node if len(deps) > 1 else None
if v and type(v._parent_node) is phpast.ArrayOffset:
return v._parent_node.expr
return None
def __eq__(self, ovar):
return self._scope == ovar._scope and \
self._lineno == ovar.lineno and \
self._name == ovar.name
def __gt__(self, ovar):
# This basically indicates precedence. Use it to know if a
# variable should override another.
return self._scope == ovar._scope and self._name == ovar.name and \
self._lineno > ovar.lineno or self.controlled_by_user
def __hash__(self):
return hash(self._name)
def __repr__(self):
return "<Var %s definition at line %s in '%s'>" % (self.name, self.lineno, self.get_file_name())
def __str__(self):
return ("Line %(lineno)s in '%(file_name)s'. Declaration of variable '%(name)s'."
" Status: %(status)s") % \
{'name': self.name,
'file_name': self.get_file_name(),
'lineno': self.lineno,
'status': self.controlled_by_user and \
("'Tainted'. Source: '%s'" % self.taint_source) or \
"'Clean'"
}
def is_tainted_for(self, vulnty):
if vulnty in self._safe_for:
return False
if self.parents:
for parent in self.parents:
if parent.is_tainted_for(vulnty) == True:
return True
return False
return True
def get_root_var(self):
'''
Return root var of var:
$a = 'bla';
$b = $a;
$c = $b;
$a is the root of $c
'''
while self.parent:
self = self.parent
return self
def deps(self):
'''
Generator function. Yields this var's dependencies.
'''
seen = set()
parents = self.parents
while parents:
for parent in parents:
if parent not in seen:
yield parent
seen.add(parent)
parents = parent.parents
def _get_ancestor_funccalls(self, node, funcs = None, level=0):
if funcs is None:
funcs = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_funccalls(item, funcs, level + 1)
break
if type(n) is phpast.FunctionCall:
funcs.append(n)
return funcs
def _get_ancestor_vars(self, node, vars = None, level=0):
'''
Return the ancestor Variables for this var.
For next example php code:
<? $a = 'ls' . $_GET['bar'] . $_POST['foo'];
$b = somefunc($a);
?>
we got that $_GET and $_POST are both $a's ancestor as well as $a is for $b.
Also determines if this var is safe for vulns
'''
if vars is None:
vars = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_vars(item, vars, level + 1)
break
if type(n) is phpast.Variable:
vars.append(n)
if level == 0:
# Securing functions
safe_for = {}
for n in vars:
# todo look at all vars
for fc in self._get_parent_nodes(n, [phpast.FunctionCall]):
# Don't set custom function calls params as parent, this is done by
# looking at the return vars
|
for vulnty, count in safe_for.iteritems():
if count == len(vars):
self._safe_for.append(vulnty)
return vars
def set_clean(self):
self._controlled_by_user = None
self._taint_source = None
self._is_root = True
def get_file_name(self):
return self._scope.file_name | if fc in self.funccall_nodes and hasattr(fc, '_obj') and fc._obj.get_called_obj():
vars.remove(n)
continue
vulnty = get_vulnty_for_sec(fc.name)
if vulnty:
if vulnty not in safe_for:
safe_for[vulnty] = 1
else:
safe_for[vulnty] = safe_for[vulnty] + 1 | conditional_block |
variable_def.py | '''
variable_def.py
Copyright 2012 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import itertools
import phply.phpast as phpast
from core.nodes.node_rep import NodeRep
from core.vulnerabilities.definitions import get_vulnty_for_sec
class VariableDef(NodeRep):
'''
Representation for the AST Variable Definition.
(...)
'''
USER_VARS = ('$_GET', '$_POST', '$_COOKIES', '$_REQUEST')
def __init__(self, name, lineno, scope, ast_node=None):
NodeRep.__init__(self, name, lineno, ast_node=ast_node)
# Containing Scope.
self._scope = scope
# Parent VariableDef
self._parents = []
# Ancestors AST FunctionCall nodes
self.funccall_nodes = []
# Ancestors AST Variable nodes
self.var_nodes = []
# Is this var controlled by user?
self._controlled_by_user = None
# Vulns this variable is safe for.
self._safe_for = []
# Being 'root' means that this var doesn't depend on any other.
self._is_root = True if (name in VariableDef.USER_VARS) else None
# Request parameter name, source for a possible vuln.
self._taint_source = None
# Is object property?
self._object_property = False
# Anon var? (param var in functioncall).
self._anon_var = False
@property
def is_root(self):
'''
A variable is said to be 'root' when it has no ancestor or when
its ancestor's name is in USER_VARS
'''
if self._is_root is None:
if not self.parents:
self._is_root = True
else:
self._is_root = False
return self._is_root
@is_root.setter
def is_root(self, is_root):
self._is_root = is_root
@property
def parents(self):
'''
Get this var's parent variable
'''
if self._is_root:
return None
if not self._parents:
# Function calls - add return values of functions as parents
self.funccall_nodes = funccall_nodes = self._get_ancestor_funccalls(self._ast_node)
for n in funccall_nodes:
if hasattr(n, '_obj'):
called_obj = n._obj.get_called_obj()
if called_obj:
for var in called_obj._return_vars:
self._parents.append(var)
# Variables
self.var_nodes = varnodes = self._get_ancestor_vars(self._ast_node)
if varnodes:
for varnode in varnodes:
if getattr(varnode,'_parent_node', None) \
and type(varnode._parent_node) is phpast.ObjectProperty \
and varnode.name == '$this':
name = varnode.name + '->' + varnode._parent_node.name
parent_var = self._scope.get_root_scope()._parent_scope.get_var(name)
if self != parent_var:
self._parents.append(self._scope.get_root_scope()._parent_scope.get_var(name))
# All other vars
# We should not set ourself as parent
parent_var = self._scope.get_var(varnode.name)
if self != parent_var:
self._parents.append(parent_var)
return self._parents
@parents.setter
def parents(self, parents):
self._parents = parents
def add_parent(self, parent):
self._parents.append(parent)
@property
def controlled_by_user(self):
|
@property
def taint_source(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
$a = $_GET['test'];
$b = $a . $_GET['ok'];
print $b;
$b taint source is ['test', 'ok']
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
vars = []
for item in reversed(deps):
if not item.is_root:
for node in item.var_nodes:
vars.append(node)
sources = []
for v in vars:
if hasattr(v, '_parent_node') and type(v._parent_node) is phpast.ArrayOffset:
sources.append(v._parent_node.expr)
return sources
# todo remove below when finished
@property
def taint_source_old(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
v = deps[-2].var_node if len(deps) > 1 else None
if v and type(v._parent_node) is phpast.ArrayOffset:
return v._parent_node.expr
return None
def __eq__(self, ovar):
return self._scope == ovar._scope and \
self._lineno == ovar.lineno and \
self._name == ovar.name
def __gt__(self, ovar):
# This basically indicates precedence. Use it to know if a
# variable should override another.
return self._scope == ovar._scope and self._name == ovar.name and \
self._lineno > ovar.lineno or self.controlled_by_user
def __hash__(self):
return hash(self._name)
def __repr__(self):
return "<Var %s definition at line %s in '%s'>" % (self.name, self.lineno, self.get_file_name())
def __str__(self):
return ("Line %(lineno)s in '%(file_name)s'. Declaration of variable '%(name)s'."
" Status: %(status)s") % \
{'name': self.name,
'file_name': self.get_file_name(),
'lineno': self.lineno,
'status': self.controlled_by_user and \
("'Tainted'. Source: '%s'" % self.taint_source) or \
"'Clean'"
}
def is_tainted_for(self, vulnty):
if vulnty in self._safe_for:
return False
if self.parents:
for parent in self.parents:
if parent.is_tainted_for(vulnty) == True:
return True
return False
return True
def get_root_var(self):
'''
Return root var of var:
$a = 'bla';
$b = $a;
$c = $b;
$a is the root of $c
'''
while self.parent:
self = self.parent
return self
def deps(self):
'''
Generator function. Yields this var's dependencies.
'''
seen = set()
parents = self.parents
while parents:
for parent in parents:
if parent not in seen:
yield parent
seen.add(parent)
parents = parent.parents
def _get_ancestor_funccalls(self, node, funcs = None, level=0):
if funcs is None:
funcs = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_funccalls(item, funcs, level + 1)
break
if type(n) is phpast.FunctionCall:
funcs.append(n)
return funcs
def _get_ancestor_vars(self, node, vars = None, level=0):
'''
Return the ancestor Variables for this var.
For next example php code:
<? $a = 'ls' . $_GET['bar'] . $_POST['foo'];
$b = somefunc($a);
?>
we got that $_GET and $_POST are both $a's ancestor as well as $a is for $b.
Also determines if this var is safe for vulns
'''
if vars is None:
vars = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_vars(item, vars, level + 1)
break
if type(n) is phpast.Variable:
vars.append(n)
if level == 0:
# Securing functions
safe_for = {}
for n in vars:
# todo look at all vars
for fc in self._get_parent_nodes(n, [phpast.FunctionCall]):
# Don't set custom function calls params as parent, this is done by
# looking at the return vars
if fc in self.funccall_nodes and hasattr(fc, '_obj') and fc._obj.get_called_obj():
vars.remove(n)
continue
vulnty = get_vulnty_for_sec(fc.name)
if vulnty:
if vulnty not in safe_for:
safe_for[vulnty] = 1
else:
safe_for[vulnty] = safe_for[vulnty] + 1
for vulnty, count in safe_for.iteritems():
if count == len(vars):
self._safe_for.append(vulnty)
return vars
def set_clean(self):
self._controlled_by_user = None
self._taint_source = None
self._is_root = True
def get_file_name(self):
return self._scope.file_name | '''
Returns bool that indicates if this variable is tainted.
'''
#cbusr = self._controlled_by_user
#cbusr = None # no cache
#if cbusr is None:
cbusr = False #todo look at this
if self.is_root:
if self._name in VariableDef.USER_VARS:
cbusr = True
else:
cbusr = False
else:
# Look at parents
for parent in self.parents:
# todo look at this hasattr
if hasattr(parent, 'controlled_by_user') and parent.controlled_by_user == True:
cbusr = True
#self._controlled_by_user = cbusr
return cbusr | identifier_body |
variable_def.py | '''
variable_def.py
Copyright 2012 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import itertools
import phply.phpast as phpast
from core.nodes.node_rep import NodeRep
from core.vulnerabilities.definitions import get_vulnty_for_sec
class VariableDef(NodeRep):
'''
Representation for the AST Variable Definition.
(...)
'''
USER_VARS = ('$_GET', '$_POST', '$_COOKIES', '$_REQUEST')
def __init__(self, name, lineno, scope, ast_node=None):
NodeRep.__init__(self, name, lineno, ast_node=ast_node)
# Containing Scope.
self._scope = scope
# Parent VariableDef
self._parents = []
# Ancestors AST FunctionCall nodes
self.funccall_nodes = []
# Ancestors AST Variable nodes
self.var_nodes = []
# Is this var controlled by user?
self._controlled_by_user = None
# Vulns this variable is safe for.
self._safe_for = []
# Being 'root' means that this var doesn't depend on any other.
self._is_root = True if (name in VariableDef.USER_VARS) else None
# Request parameter name, source for a possible vuln.
self._taint_source = None
# Is object property?
self._object_property = False
# Anon var? (param var in functioncall).
self._anon_var = False
@property
def is_root(self):
'''
A variable is said to be 'root' when it has no ancestor or when
its ancestor's name is in USER_VARS
'''
if self._is_root is None:
if not self.parents:
self._is_root = True
else:
self._is_root = False
return self._is_root
@is_root.setter
def is_root(self, is_root):
self._is_root = is_root
@property
def parents(self):
'''
Get this var's parent variable
'''
if self._is_root:
return None
if not self._parents:
# Function calls - add return values of functions as parents
self.funccall_nodes = funccall_nodes = self._get_ancestor_funccalls(self._ast_node)
for n in funccall_nodes:
if hasattr(n, '_obj'):
called_obj = n._obj.get_called_obj()
if called_obj:
for var in called_obj._return_vars:
self._parents.append(var)
# Variables
self.var_nodes = varnodes = self._get_ancestor_vars(self._ast_node)
if varnodes:
for varnode in varnodes:
if getattr(varnode,'_parent_node', None) \
and type(varnode._parent_node) is phpast.ObjectProperty \
and varnode.name == '$this':
name = varnode.name + '->' + varnode._parent_node.name
parent_var = self._scope.get_root_scope()._parent_scope.get_var(name)
if self != parent_var:
self._parents.append(self._scope.get_root_scope()._parent_scope.get_var(name))
# All other vars
# We should not set ourself as parent
parent_var = self._scope.get_var(varnode.name)
if self != parent_var:
self._parents.append(parent_var)
return self._parents
@parents.setter
def parents(self, parents):
self._parents = parents
def add_parent(self, parent):
self._parents.append(parent)
@property
def controlled_by_user(self):
'''
Returns bool that indicates if this variable is tainted.
'''
#cbusr = self._controlled_by_user
#cbusr = None # no cache
#if cbusr is None:
cbusr = False #todo look at this
if self.is_root:
if self._name in VariableDef.USER_VARS:
cbusr = True
else:
cbusr = False
else:
# Look at parents
for parent in self.parents:
# todo look at this hasattr
if hasattr(parent, 'controlled_by_user') and parent.controlled_by_user == True:
cbusr = True
#self._controlled_by_user = cbusr
return cbusr
@property
def taint_source(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
$a = $_GET['test'];
$b = $a . $_GET['ok'];
print $b;
$b taint source is ['test', 'ok']
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
vars = []
for item in reversed(deps):
if not item.is_root:
for node in item.var_nodes:
vars.append(node)
sources = []
for v in vars:
if hasattr(v, '_parent_node') and type(v._parent_node) is phpast.ArrayOffset:
sources.append(v._parent_node.expr)
return sources
# todo remove below when finished
@property
def taint_source_old(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
v = deps[-2].var_node if len(deps) > 1 else None
if v and type(v._parent_node) is phpast.ArrayOffset:
return v._parent_node.expr
return None
def __eq__(self, ovar):
return self._scope == ovar._scope and \
self._lineno == ovar.lineno and \
self._name == ovar.name
def __gt__(self, ovar):
# This basically indicates precedence. Use it to know if a
# variable should override another.
return self._scope == ovar._scope and self._name == ovar.name and \
self._lineno > ovar.lineno or self.controlled_by_user
def __hash__(self):
return hash(self._name)
def __repr__(self):
return "<Var %s definition at line %s in '%s'>" % (self.name, self.lineno, self.get_file_name())
| 'file_name': self.get_file_name(),
'lineno': self.lineno,
'status': self.controlled_by_user and \
("'Tainted'. Source: '%s'" % self.taint_source) or \
"'Clean'"
}
def is_tainted_for(self, vulnty):
if vulnty in self._safe_for:
return False
if self.parents:
for parent in self.parents:
if parent.is_tainted_for(vulnty) == True:
return True
return False
return True
def get_root_var(self):
'''
Return root var of var:
$a = 'bla';
$b = $a;
$c = $b;
$a is the root of $c
'''
while self.parent:
self = self.parent
return self
def deps(self):
'''
Generator function. Yields this var's dependencies.
'''
seen = set()
parents = self.parents
while parents:
for parent in parents:
if parent not in seen:
yield parent
seen.add(parent)
parents = parent.parents
def _get_ancestor_funccalls(self, node, funcs = None, level=0):
if funcs is None:
funcs = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_funccalls(item, funcs, level + 1)
break
if type(n) is phpast.FunctionCall:
funcs.append(n)
return funcs
def _get_ancestor_vars(self, node, vars = None, level=0):
'''
Return the ancestor Variables for this var.
For next example php code:
<? $a = 'ls' . $_GET['bar'] . $_POST['foo'];
$b = somefunc($a);
?>
we got that $_GET and $_POST are both $a's ancestor as well as $a is for $b.
Also determines if this var is safe for vulns
'''
if vars is None:
vars = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_vars(item, vars, level + 1)
break
if type(n) is phpast.Variable:
vars.append(n)
if level == 0:
# Securing functions
safe_for = {}
for n in vars:
# todo look at all vars
for fc in self._get_parent_nodes(n, [phpast.FunctionCall]):
# Don't set custom function calls params as parent, this is done by
# looking at the return vars
if fc in self.funccall_nodes and hasattr(fc, '_obj') and fc._obj.get_called_obj():
vars.remove(n)
continue
vulnty = get_vulnty_for_sec(fc.name)
if vulnty:
if vulnty not in safe_for:
safe_for[vulnty] = 1
else:
safe_for[vulnty] = safe_for[vulnty] + 1
for vulnty, count in safe_for.iteritems():
if count == len(vars):
self._safe_for.append(vulnty)
return vars
def set_clean(self):
self._controlled_by_user = None
self._taint_source = None
self._is_root = True
def get_file_name(self):
return self._scope.file_name | def __str__(self):
return ("Line %(lineno)s in '%(file_name)s'. Declaration of variable '%(name)s'."
" Status: %(status)s") % \
{'name': self.name, | random_line_split |
variable_def.py | '''
variable_def.py
Copyright 2012 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import itertools
import phply.phpast as phpast
from core.nodes.node_rep import NodeRep
from core.vulnerabilities.definitions import get_vulnty_for_sec
class VariableDef(NodeRep):
'''
Representation for the AST Variable Definition.
(...)
'''
USER_VARS = ('$_GET', '$_POST', '$_COOKIES', '$_REQUEST')
def __init__(self, name, lineno, scope, ast_node=None):
NodeRep.__init__(self, name, lineno, ast_node=ast_node)
# Containing Scope.
self._scope = scope
# Parent VariableDef
self._parents = []
# Ancestors AST FunctionCall nodes
self.funccall_nodes = []
# Ancestors AST Variable nodes
self.var_nodes = []
# Is this var controlled by user?
self._controlled_by_user = None
# Vulns this variable is safe for.
self._safe_for = []
# Being 'root' means that this var doesn't depend on any other.
self._is_root = True if (name in VariableDef.USER_VARS) else None
# Request parameter name, source for a possible vuln.
self._taint_source = None
# Is object property?
self._object_property = False
# Anon var? (param var in functioncall).
self._anon_var = False
@property
def is_root(self):
'''
A variable is said to be 'root' when it has no ancestor or when
its ancestor's name is in USER_VARS
'''
if self._is_root is None:
if not self.parents:
self._is_root = True
else:
self._is_root = False
return self._is_root
@is_root.setter
def | (self, is_root):
self._is_root = is_root
@property
def parents(self):
'''
Get this var's parent variable
'''
if self._is_root:
return None
if not self._parents:
# Function calls - add return values of functions as parents
self.funccall_nodes = funccall_nodes = self._get_ancestor_funccalls(self._ast_node)
for n in funccall_nodes:
if hasattr(n, '_obj'):
called_obj = n._obj.get_called_obj()
if called_obj:
for var in called_obj._return_vars:
self._parents.append(var)
# Variables
self.var_nodes = varnodes = self._get_ancestor_vars(self._ast_node)
if varnodes:
for varnode in varnodes:
if getattr(varnode,'_parent_node', None) \
and type(varnode._parent_node) is phpast.ObjectProperty \
and varnode.name == '$this':
name = varnode.name + '->' + varnode._parent_node.name
parent_var = self._scope.get_root_scope()._parent_scope.get_var(name)
if self != parent_var:
self._parents.append(self._scope.get_root_scope()._parent_scope.get_var(name))
# All other vars
# We should not set ourself as parent
parent_var = self._scope.get_var(varnode.name)
if self != parent_var:
self._parents.append(parent_var)
return self._parents
@parents.setter
def parents(self, parents):
self._parents = parents
def add_parent(self, parent):
self._parents.append(parent)
@property
def controlled_by_user(self):
'''
Returns bool that indicates if this variable is tainted.
'''
#cbusr = self._controlled_by_user
#cbusr = None # no cache
#if cbusr is None:
cbusr = False #todo look at this
if self.is_root:
if self._name in VariableDef.USER_VARS:
cbusr = True
else:
cbusr = False
else:
# Look at parents
for parent in self.parents:
# todo look at this hasattr
if hasattr(parent, 'controlled_by_user') and parent.controlled_by_user == True:
cbusr = True
#self._controlled_by_user = cbusr
return cbusr
@property
def taint_source(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
$a = $_GET['test'];
$b = $a . $_GET['ok'];
print $b;
$b taint source is ['test', 'ok']
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
vars = []
for item in reversed(deps):
if not item.is_root:
for node in item.var_nodes:
vars.append(node)
sources = []
for v in vars:
if hasattr(v, '_parent_node') and type(v._parent_node) is phpast.ArrayOffset:
sources.append(v._parent_node.expr)
return sources
# todo remove below when finished
@property
def taint_source_old(self):
'''
Return the taint source for this Variable Definition if any; otherwise
return None.
'''
taintsrc = self._taint_source
if taintsrc:
return taintsrc
else:
deps = list(itertools.chain((self,), self.deps()))
v = deps[-2].var_node if len(deps) > 1 else None
if v and type(v._parent_node) is phpast.ArrayOffset:
return v._parent_node.expr
return None
def __eq__(self, ovar):
return self._scope == ovar._scope and \
self._lineno == ovar.lineno and \
self._name == ovar.name
def __gt__(self, ovar):
# This basically indicates precedence. Use it to know if a
# variable should override another.
return self._scope == ovar._scope and self._name == ovar.name and \
self._lineno > ovar.lineno or self.controlled_by_user
def __hash__(self):
return hash(self._name)
def __repr__(self):
return "<Var %s definition at line %s in '%s'>" % (self.name, self.lineno, self.get_file_name())
def __str__(self):
return ("Line %(lineno)s in '%(file_name)s'. Declaration of variable '%(name)s'."
" Status: %(status)s") % \
{'name': self.name,
'file_name': self.get_file_name(),
'lineno': self.lineno,
'status': self.controlled_by_user and \
("'Tainted'. Source: '%s'" % self.taint_source) or \
"'Clean'"
}
def is_tainted_for(self, vulnty):
if vulnty in self._safe_for:
return False
if self.parents:
for parent in self.parents:
if parent.is_tainted_for(vulnty) == True:
return True
return False
return True
def get_root_var(self):
'''
Return root var of var:
$a = 'bla';
$b = $a;
$c = $b;
$a is the root of $c
'''
while self.parent:
self = self.parent
return self
def deps(self):
'''
Generator function. Yields this var's dependencies.
'''
seen = set()
parents = self.parents
while parents:
for parent in parents:
if parent not in seen:
yield parent
seen.add(parent)
parents = parent.parents
def _get_ancestor_funccalls(self, node, funcs = None, level=0):
if funcs is None:
funcs = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_funccalls(item, funcs, level + 1)
break
if type(n) is phpast.FunctionCall:
funcs.append(n)
return funcs
def _get_ancestor_vars(self, node, vars = None, level=0):
'''
Return the ancestor Variables for this var.
For next example php code:
<? $a = 'ls' . $_GET['bar'] . $_POST['foo'];
$b = somefunc($a);
?>
we got that $_GET and $_POST are both $a's ancestor as well as $a is for $b.
Also determines if this var is safe for vulns
'''
if vars is None:
vars = []
for n in NodeRep.parse(node):
if type(node) is phpast.BinaryOp:
# only parse direct nodes
for item in NodeRep.parse(node, 0, 0, 1):
self._get_ancestor_vars(item, vars, level + 1)
break
if type(n) is phpast.Variable:
vars.append(n)
if level == 0:
# Securing functions
safe_for = {}
for n in vars:
# todo look at all vars
for fc in self._get_parent_nodes(n, [phpast.FunctionCall]):
# Don't set custom function calls params as parent, this is done by
# looking at the return vars
if fc in self.funccall_nodes and hasattr(fc, '_obj') and fc._obj.get_called_obj():
vars.remove(n)
continue
vulnty = get_vulnty_for_sec(fc.name)
if vulnty:
if vulnty not in safe_for:
safe_for[vulnty] = 1
else:
safe_for[vulnty] = safe_for[vulnty] + 1
for vulnty, count in safe_for.iteritems():
if count == len(vars):
self._safe_for.append(vulnty)
return vars
def set_clean(self):
self._controlled_by_user = None
self._taint_source = None
self._is_root = True
def get_file_name(self):
return self._scope.file_name | is_root | identifier_name |
index.js | $(function () {
//---------- Initializing plugins ----------
//------------------------------------------
var
galleryGrid = $('#gallery-grid'),
reviews = $('.reviews'),
preloader = $('.preloader'),
calendar = $('#calendarContainer'),
logo = '',
$window = $(window),
windowWidth = 0,
windowHeight = 0,
x = 0,
phoneFlag = false,
emailFlag = false,
action = '',
nonce = '',
closeMarkup = '<button title="Закрыть (Esc)" type="button" class="mfp-close">' +
'<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"' +
'viewBox="0 0 371.23 371.23" style="enable-background:new 0 0 371.23 371.23;" xml:space="preserve">' +
'<polygon points="371.23,21.213 350.018,0 185.615,164.402 21.213,0 0,21.213 164.402,185.615 0,350.018 21.213,371.23 ' +
'185.615,206.828 350.018,371.23 371.23,350.018 206.828,185.615 "/>' +
'</svg>' +
'</button>',
arrowMarkup = '<button title="%title%" type="button" class="mfp-arrow mfp-arrow-%dir%">' +
'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" x="0px" y="0px" viewBox="0 0 407.437 407.437" style="enable-background:new 0 0 407.437 407.437;" xml:space="preserve" width="512px" height="512px">' +
'<polygon points="386.258,91.567 203.718,273.512 21.179,91.567 0,112.815 203.718,315.87 407.437,112.815 "></polygon>' +
'</svg>' +
'</button>';
//------------------------------------------
//------------------------------------------
//---------- Initializing plugins ----------
//------------------------------------------
// Page Scroll To ID
$('.menu a[href*="#"]').mPageScroll2id({
highlightClass: 'active',
highlightSelector: '.menu a',
scrollEasing: 'linear',
scrollSpeed: 300
});
// ImagesLoad and Isotope
galleryGrid.justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10
})
.on('jg.complete', function () {
$(this).magnificPopup({
delegate: 'a',
type: 'image',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300,
gallery: {
enabled: true,
arrowMarkup: arrowMarkup,
tPrev: 'Назад (стрелка влево)',
tNext: 'Вперед (стрелка вправо)',
tCounter: '<span class="mfp-counter">%curr% из %total%</span>'
}
});
});
$('.reviews-gallery').justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10,
'target': '_blank'
});
// Magnific Popup YouTube
$('.popup-youtube').magnificPopup({
type: 'iframe',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
// Magnific Popup Reviews
$('.reviews__link').magnificPopup({
type: 'inline',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
$(":input").inputmask(undefined, {
oncomplete: function () {
if (this.id === "phone") phoneFlag = true;
if (this.id === "email") emailFlag = true;
},
onincomplete: function () {
if (this.id === "phone") phoneFlag = false;
if (this.id === "email") emailFlag = false;
},
onKeyValidation: function (result, opts) {
if (this.id === "phone" && $(this).inputmask("getmetadata")) {
console.log($(this).inputmask("getmetadata")["cd"]);
}
}
});
//------------------------------------------
//------------------------------------------
//---------------- Methods -----------------
//------------------------------------------
function onResize() {
windowWidth = $(window).width();
windowHeight = $(window).height();
reviews.find('.reviews__content').css('height', 'auto').equivalent();
}onResize();
function headerActive() {
if ($window.scrollTop() > 0)
$('.header').addClass('active');
else
$('.header').removeClass('active');
}headerActive();
function arrowRotate() {
if ($window.scrollTop() > windowHe | eading, text, icon) {
return {
heading: heading,
text : text,
showHideTransition : 'fade',
textColor : '#fff',
icon: icon,
hideAfter : 5000,
stack : 5,
textAlign : 'left',
position : 'bottom-right',
loader: false
}
}
function calendarDataAjax() {
var responsive = '';
action = 'kolesnichenko_events_data';
nonce = calendar.data('nonce');
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: 'action=' + action + '&nonce=' + nonce
}).done(function (data) {
responsive = $.parseJSON(data);
console.log(responsive);
calendarInit(responsive['dataSource']);
listCategoriesAddItem(responsive['dataCategory']);
}).fail(function (error) {
console.log(error);
});
}calendarDataAjax();
function calendarTransformData(calendarData) {
var
startDateArray = '',
endDateArray = '';
$.each(calendarData, function (i, value) {
startDateArray = value['startDate'].split('-');
endDateArray = value['endDate'].split('-');
value['startDate'] = new Date(startDateArray[0], startDateArray[1] - 1, startDateArray[2]);
value['endDate'] = new Date(endDateArray[0], endDateArray[1] - 1, endDateArray[2]);
});
return calendarData;
}
function listCategoriesAddItem(dataCategories) {
$.each(dataCategories, function (i, value) {
$('.calendar-legend__list')
.append('<li class="calendar-legend__item"><span style="background-color: ' + value['color'] + ';" class="calendar-legend__color"></span>' + value['name'] + '</li>')
})
}
function ggpopopverCreateContent(events) {
var
title = '',
text = '',
content = '',
time = '',
startTime = '',
endTime = '',
location = '',
address = '',
country = '',
city = '',
fullAddress = '',
phone = '',
meta = '',
thumbImg = '',
thumbURL = '',
arrowcolor = '',
corporateFlag = '',
eventsFlag = '',
weddingsFlag = '',
ggpopoverTitleClass = '';
ggpopoverTitleClass = 'ggpopover__title';
$.each(events, function (i, value) {
// category flags
corporateFlag = value['eventCategorySlug'] !== 'corporategraduationanniversaries';
eventsFlag = value['eventCategorySlug'] !== 'events';
weddingsFlag = value['eventCategorySlug'] !== 'weddings';
// thumbnail image
thumbImg = (value['thumbURL'] !== false) ? '<div class="ggpopover__img" style="background-image: url('+ value['thumbURL'] +');"></div>' : '';
// text
text = value['text'] !== '' ? '<p class="ggpopover__text">' + value['text'] + '</p>' : '';
ggpopoverTitleClass += text === '' ? ' ' + ggpopoverTitleClass+'_only' : '';
// title
title = value['title'] !== '' ? '<h3 class="' + ggpopoverTitleClass + '">' + value['title'] + '</h3>' : '';
// location
location = (value['location'] !== false) ? '<span><i class="ggpopover__icon icon-building"></i>' + value['location'] + '</span>' : '';
// fullAddress
address = (value['address'] !== false) ? value['address'] + ', ' : '';
city = (value['city'] !== false) ? value['city'] + ', ' : '';
country = (value['country'] !== false) ? value['country'] + ', ' : '';
fullAddress = address + city + country;
fullAddress = fullAddress.substr(0, fullAddress.length - 2); // отсекаем ', ' в конце
fullAddress = fullAddress !== '' ? '<span><i class="ggpopover__icon icon-location"></i>' + fullAddress + '</span>' : '';
// phone
phone = value['phone'] !== '' ? '<span><i class="ggpopover__icon icon-phone"></i>' + value['phone'] + '</span>' : '';
// time
startTime = (value['startTime'] !== '' && weddingsFlag && corporateFlag) ? value['startTime'] : '';
endTime = (value['endTime'] !== '' && weddingsFlag && corporateFlag && eventsFlag) ? ' - ' + value['endTime'] : '';
time = startTime + endTime;
time = (time !== '') ? '<span><i class="ggpopover__icon icon-clock"></i>' + time + '</span>' : '';
// meta data
meta = location + fullAddress + phone + time;
meta = (meta !== '') ? '<p class="ggpopover__meta">' + meta +'</p>' : '';
content = thumbImg + title + text + meta;
arrowcolor = $(meta).is(':empty') ? '#fff' : '#f7f7f7';
});
return {
content: content,
arrowcolor: arrowcolor
};
}
function ggpopoverInit(objElement, objData) {
var args = {
trigger: 'focus',
container: 'body',
html: true,
placement: 'top'
};
$(objElement).ggpopover(Object.assign(args, objData));
}
function calendarInit(dataSource) {
calendar.calendar({
language: 'ru',
dataSource: calendarTransformData(dataSource),
mouseOnDay: function (e) {
var
ggpopoverContentData = {},
element = '';
if (e.events.length > 0) {
ggpopoverContentData = ggpopopverCreateContent(e.events);
element = $(e.element).find('a.ggpopover__link');
ggpopoverInit(element, ggpopoverContentData);
}
},
renderEnd: function () {
var calendarDate = '';
$.each($('td.day:not(.old) .day-content'), function (i, value) {
calendarDate = $(value)['text']();
$(value)
.empty()
.html('<a class="ggpopover__link" href="#events">' + calendarDate + '</a>');
});
}
});
}
//------------------------------------------
//------------------------------------------
//---------------- Events ------------------
//------------------------------------------
$(".toggle-mnu").click(function () {
$(this).toggleClass("on");
$(".nav-menu-mobile").slideToggle();
return false;
});
$window.on('scroll', function () {
headerActive();
arrowRotate();
});
$window.on('resize', function () {
onResize();
});
$('.content__section').mousemove(function () {
$('.content__section--home, .content__section--about-me').css({
'filter': 'grayscale(50%)'
});
});
$('.content__section--home').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x < windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
$('.content__section--about-me').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x > windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
//E-mail Ajax Send
$('.contact-form__form').submit(function () { //Change
var
th = $(this),
confident = $('#confident');
action = 'send_mail';
nonce = th.data("nonce");
if (!phoneFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный номер телефона.',
'warning'
));
return false;
}
if (!emailFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный E-mail аддресс.',
'warning'
));
return false;
}
if (!confident.prop('checked')){
$.toast(toast_create(
'Внимание...',
'Отправляя сообщение, Вы должны принять пользовательское соглашение и подтвердить, что ознакомлены и согласны с политикой конфиденциальности данного сайта.',
'warning'
));
return false;
}
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: th.serialize() + '&action=' + action + '&nonce=' + nonce
}).done(function (data) {
data = $.parseJSON(data);
if (data.status === 'success')
$.toast(toast_create('Успех...', data.msg, 'success'));
if (data.status === 'error')
$.toast(toast_create('Упс...', data.msg, 'error'));
setTimeout(function () {
// Done Functions
th.trigger("reset");
}, 1000);
}).fail(function (data) {
data = $.parseJSON(data);
$.toast(toast_create('Упс...', data.msg, 'error'));
});
return false;
});
//------------------------------------------
//------------------------------------------
logo = preloader.find('.preloader__logo, .preloader__load');
setTimeout(function () {
logo.fadeOut();
preloader.delay(350).fadeOut('slow');
}, 1000);
});
$.fn.equivalent = function () {
var $blocks = $(this),
maxH = $blocks.eq(0).height();
$blocks.each(function () {
maxH = ( $(this).height() > maxH ) ? $(this).height() : maxH;
});
$blocks.height(maxH);
};
| ight / 3) {
$('.arrow-down svg').css({
'transform': 'rotate(180deg)'
});
$('.arrow-down a').attr('href', '#home');
}
else {
$('.arrow-down svg').css({
'transform': 'rotate(0)'
});
$('.arrow-down a').attr('href', '#about-me');
}
}arrowRotate();
function toast_create(h | identifier_body |
index.js | $(function () {
//---------- Initializing plugins ----------
//------------------------------------------
var
galleryGrid = $('#gallery-grid'),
reviews = $('.reviews'),
preloader = $('.preloader'),
calendar = $('#calendarContainer'),
logo = '',
$window = $(window),
windowWidth = 0,
windowHeight = 0,
x = 0,
phoneFlag = false,
emailFlag = false,
action = '',
nonce = '',
closeMarkup = '<button title="Закрыть (Esc)" type="button" class="mfp-close">' +
'<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"' +
'viewBox="0 0 371.23 371.23" style="enable-background:new 0 0 371.23 371.23;" xml:space="preserve">' +
'<polygon points="371.23,21.213 350.018,0 185.615,164.402 21.213,0 0,21.213 164.402,185.615 0,350.018 21.213,371.23 ' +
'185.615,206.828 350.018,371.23 371.23,350.018 206.828,185.615 "/>' +
'</svg>' +
'</button>',
arrowMarkup = '<button title="%title%" type="button" class="mfp-arrow mfp-arrow-%dir%">' +
'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" x="0px" y="0px" viewBox="0 0 407.437 407.437" style="enable-background:new 0 0 407.437 407.437;" xml:space="preserve" width="512px" height="512px">' +
'<polygon points="386.258,91.567 203.718,273.512 21.179,91.567 0,112.815 203.718,315.87 407.437,112.815 "></polygon>' +
'</svg>' +
'</button>';
//------------------------------------------
//------------------------------------------
//---------- Initializing plugins ----------
//------------------------------------------
// Page Scroll To ID
$('.menu a[href*="#"]').mPageScroll2id({
highlightClass: 'active',
highlightSelector: '.menu a',
scrollEasing: 'linear',
scrollSpeed: 300
});
// ImagesLoad and Isotope
galleryGrid.justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10
})
.on('jg.complete', function () {
$(this).magnificPopup({
delegate: 'a',
type: 'image',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300,
gallery: {
enabled: true,
arrowMarkup: arrowMarkup,
tPrev: 'Назад (стрелка влево)',
tNext: 'Вперед (стрелка вправо)',
tCounter: '<span class="mfp-counter">%curr% из %total%</span>'
}
});
});
$('.reviews-gallery').justifiedGallery({
'rowHeight': 180,
| // Magnific Popup YouTube
$('.popup-youtube').magnificPopup({
type: 'iframe',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
// Magnific Popup Reviews
$('.reviews__link').magnificPopup({
type: 'inline',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
$(":input").inputmask(undefined, {
oncomplete: function () {
if (this.id === "phone") phoneFlag = true;
if (this.id === "email") emailFlag = true;
},
onincomplete: function () {
if (this.id === "phone") phoneFlag = false;
if (this.id === "email") emailFlag = false;
},
onKeyValidation: function (result, opts) {
if (this.id === "phone" && $(this).inputmask("getmetadata")) {
console.log($(this).inputmask("getmetadata")["cd"]);
}
}
});
//------------------------------------------
//------------------------------------------
//---------------- Methods -----------------
//------------------------------------------
function onResize() {
windowWidth = $(window).width();
windowHeight = $(window).height();
reviews.find('.reviews__content').css('height', 'auto').equivalent();
}onResize();
function headerActive() {
if ($window.scrollTop() > 0)
$('.header').addClass('active');
else
$('.header').removeClass('active');
}headerActive();
function arrowRotate() {
if ($window.scrollTop() > windowHeight / 3) {
$('.arrow-down svg').css({
'transform': 'rotate(180deg)'
});
$('.arrow-down a').attr('href', '#home');
}
else {
$('.arrow-down svg').css({
'transform': 'rotate(0)'
});
$('.arrow-down a').attr('href', '#about-me');
}
}arrowRotate();
function toast_create(heading, text, icon) {
return {
heading: heading,
text : text,
showHideTransition : 'fade',
textColor : '#fff',
icon: icon,
hideAfter : 5000,
stack : 5,
textAlign : 'left',
position : 'bottom-right',
loader: false
}
}
function calendarDataAjax() {
var responsive = '';
action = 'kolesnichenko_events_data';
nonce = calendar.data('nonce');
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: 'action=' + action + '&nonce=' + nonce
}).done(function (data) {
responsive = $.parseJSON(data);
console.log(responsive);
calendarInit(responsive['dataSource']);
listCategoriesAddItem(responsive['dataCategory']);
}).fail(function (error) {
console.log(error);
});
}calendarDataAjax();
function calendarTransformData(calendarData) {
var
startDateArray = '',
endDateArray = '';
$.each(calendarData, function (i, value) {
startDateArray = value['startDate'].split('-');
endDateArray = value['endDate'].split('-');
value['startDate'] = new Date(startDateArray[0], startDateArray[1] - 1, startDateArray[2]);
value['endDate'] = new Date(endDateArray[0], endDateArray[1] - 1, endDateArray[2]);
});
return calendarData;
}
function listCategoriesAddItem(dataCategories) {
$.each(dataCategories, function (i, value) {
$('.calendar-legend__list')
.append('<li class="calendar-legend__item"><span style="background-color: ' + value['color'] + ';" class="calendar-legend__color"></span>' + value['name'] + '</li>')
})
}
function ggpopopverCreateContent(events) {
var
title = '',
text = '',
content = '',
time = '',
startTime = '',
endTime = '',
location = '',
address = '',
country = '',
city = '',
fullAddress = '',
phone = '',
meta = '',
thumbImg = '',
thumbURL = '',
arrowcolor = '',
corporateFlag = '',
eventsFlag = '',
weddingsFlag = '',
ggpopoverTitleClass = '';
ggpopoverTitleClass = 'ggpopover__title';
$.each(events, function (i, value) {
// category flags
corporateFlag = value['eventCategorySlug'] !== 'corporategraduationanniversaries';
eventsFlag = value['eventCategorySlug'] !== 'events';
weddingsFlag = value['eventCategorySlug'] !== 'weddings';
// thumbnail image
thumbImg = (value['thumbURL'] !== false) ? '<div class="ggpopover__img" style="background-image: url('+ value['thumbURL'] +');"></div>' : '';
// text
text = value['text'] !== '' ? '<p class="ggpopover__text">' + value['text'] + '</p>' : '';
ggpopoverTitleClass += text === '' ? ' ' + ggpopoverTitleClass+'_only' : '';
// title
title = value['title'] !== '' ? '<h3 class="' + ggpopoverTitleClass + '">' + value['title'] + '</h3>' : '';
// location
location = (value['location'] !== false) ? '<span><i class="ggpopover__icon icon-building"></i>' + value['location'] + '</span>' : '';
// fullAddress
address = (value['address'] !== false) ? value['address'] + ', ' : '';
city = (value['city'] !== false) ? value['city'] + ', ' : '';
country = (value['country'] !== false) ? value['country'] + ', ' : '';
fullAddress = address + city + country;
fullAddress = fullAddress.substr(0, fullAddress.length - 2); // отсекаем ', ' в конце
fullAddress = fullAddress !== '' ? '<span><i class="ggpopover__icon icon-location"></i>' + fullAddress + '</span>' : '';
// phone
phone = value['phone'] !== '' ? '<span><i class="ggpopover__icon icon-phone"></i>' + value['phone'] + '</span>' : '';
// time
startTime = (value['startTime'] !== '' && weddingsFlag && corporateFlag) ? value['startTime'] : '';
endTime = (value['endTime'] !== '' && weddingsFlag && corporateFlag && eventsFlag) ? ' - ' + value['endTime'] : '';
time = startTime + endTime;
time = (time !== '') ? '<span><i class="ggpopover__icon icon-clock"></i>' + time + '</span>' : '';
// meta data
meta = location + fullAddress + phone + time;
meta = (meta !== '') ? '<p class="ggpopover__meta">' + meta +'</p>' : '';
content = thumbImg + title + text + meta;
arrowcolor = $(meta).is(':empty') ? '#fff' : '#f7f7f7';
});
return {
content: content,
arrowcolor: arrowcolor
};
}
function ggpopoverInit(objElement, objData) {
var args = {
trigger: 'focus',
container: 'body',
html: true,
placement: 'top'
};
$(objElement).ggpopover(Object.assign(args, objData));
}
function calendarInit(dataSource) {
calendar.calendar({
language: 'ru',
dataSource: calendarTransformData(dataSource),
mouseOnDay: function (e) {
var
ggpopoverContentData = {},
element = '';
if (e.events.length > 0) {
ggpopoverContentData = ggpopopverCreateContent(e.events);
element = $(e.element).find('a.ggpopover__link');
ggpopoverInit(element, ggpopoverContentData);
}
},
renderEnd: function () {
var calendarDate = '';
$.each($('td.day:not(.old) .day-content'), function (i, value) {
calendarDate = $(value)['text']();
$(value)
.empty()
.html('<a class="ggpopover__link" href="#events">' + calendarDate + '</a>');
});
}
});
}
//------------------------------------------
//------------------------------------------
//---------------- Events ------------------
//------------------------------------------
$(".toggle-mnu").click(function () {
$(this).toggleClass("on");
$(".nav-menu-mobile").slideToggle();
return false;
});
$window.on('scroll', function () {
headerActive();
arrowRotate();
});
$window.on('resize', function () {
onResize();
});
$('.content__section').mousemove(function () {
$('.content__section--home, .content__section--about-me').css({
'filter': 'grayscale(50%)'
});
});
$('.content__section--home').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x < windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
$('.content__section--about-me').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x > windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
//E-mail Ajax Send
$('.contact-form__form').submit(function () { //Change
var
th = $(this),
confident = $('#confident');
action = 'send_mail';
nonce = th.data("nonce");
if (!phoneFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный номер телефона.',
'warning'
));
return false;
}
if (!emailFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный E-mail аддресс.',
'warning'
));
return false;
}
if (!confident.prop('checked')){
$.toast(toast_create(
'Внимание...',
'Отправляя сообщение, Вы должны принять пользовательское соглашение и подтвердить, что ознакомлены и согласны с политикой конфиденциальности данного сайта.',
'warning'
));
return false;
}
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: th.serialize() + '&action=' + action + '&nonce=' + nonce
}).done(function (data) {
data = $.parseJSON(data);
if (data.status === 'success')
$.toast(toast_create('Успех...', data.msg, 'success'));
if (data.status === 'error')
$.toast(toast_create('Упс...', data.msg, 'error'));
setTimeout(function () {
// Done Functions
th.trigger("reset");
}, 1000);
}).fail(function (data) {
data = $.parseJSON(data);
$.toast(toast_create('Упс...', data.msg, 'error'));
});
return false;
});
//------------------------------------------
//------------------------------------------
logo = preloader.find('.preloader__logo, .preloader__load');
setTimeout(function () {
logo.fadeOut();
preloader.delay(350).fadeOut('slow');
}, 1000);
});
$.fn.equivalent = function () {
var $blocks = $(this),
maxH = $blocks.eq(0).height();
$blocks.each(function () {
maxH = ( $(this).height() > maxH ) ? $(this).height() : maxH;
});
$blocks.height(maxH);
}; | 'lastRow': 'justify',
'margins': 10,
'target': '_blank'
});
| random_line_split |
index.js | $(function () {
//---------- Initializing plugins ----------
//------------------------------------------
var
galleryGrid = $('#gallery-grid'),
reviews = $('.reviews'),
preloader = $('.preloader'),
calendar = $('#calendarContainer'),
logo = '',
$window = $(window),
windowWidth = 0,
windowHeight = 0,
x = 0,
phoneFlag = false,
emailFlag = false,
action = '',
nonce = '',
closeMarkup = '<button title="Закрыть (Esc)" type="button" class="mfp-close">' +
'<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"' +
'viewBox="0 0 371.23 371.23" style="enable-background:new 0 0 371.23 371.23;" xml:space="preserve">' +
'<polygon points="371.23,21.213 350.018,0 185.615,164.402 21.213,0 0,21.213 164.402,185.615 0,350.018 21.213,371.23 ' +
'185.615,206.828 350.018,371.23 371.23,350.018 206.828,185.615 "/>' +
'</svg>' +
'</button>',
arrowMarkup = '<button title="%title%" type="button" class="mfp-arrow mfp-arrow-%dir%">' +
'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" x="0px" y="0px" viewBox="0 0 407.437 407.437" style="enable-background:new 0 0 407.437 407.437;" xml:space="preserve" width="512px" height="512px">' +
'<polygon points="386.258,91.567 203.718,273.512 21.179,91.567 0,112.815 203.718,315.87 407.437,112.815 "></polygon>' +
'</svg>' +
'</button>';
//------------------------------------------
//------------------------------------------
//---------- Initializing plugins ----------
//------------------------------------------
// Page Scroll To ID
$('.menu a[href*="#"]').mPageScroll2id({
highlightClass: 'active',
highlightSelector: '.menu a',
scrollEasing: 'linear',
scrollSpeed: 300
});
// ImagesLoad and Isotope
galleryGrid.justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10
})
.on('jg.complete', function () {
$(this).magnificPopup({
delegate: 'a',
type: 'image',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300,
gallery: {
enabled: true,
arrowMarkup: arrowMarkup,
tPrev: 'Назад (стрелка влево)',
tNext: 'Вперед (стрелка вправо)',
tCounter: '<span class="mfp-counter">%curr% из %total%</span>'
}
});
});
$('.reviews-gallery').justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10,
'target': '_blank'
});
// Magnific Popup YouTube
$('.popup-youtube').magnificPopup({
type: 'iframe',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
// Magnific Popup Reviews
$('.reviews__link').magnificPopup({
type: 'inline',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
$(":input").inputmask(undefined, {
oncomplete: function () {
if (this.id === "phone") phoneFlag = true;
if (this.id === "email") emailFlag = true;
},
onincomplete: function () {
if (this.id === "phone") phoneFlag = false;
if (this.id === "email") emailFlag = false;
},
onKeyValidation: function (result, opts) {
if (this.id === "phone" && $(this).inputmask("getmetadata")) {
console.log($(this).inputmask("getmetadata")["cd"]);
}
}
});
//------------------------------------------
//------------------------------------------
//---------------- Methods -----------------
//------------------------------------------
function onResize() {
windowWidth = $(window).width();
windowHeight = $(window).height();
reviews.find('.reviews__content').css('height', 'auto').equivalent();
}onResize();
function headerActive() {
if ($window.scrollTop() > 0)
$('.header').addClass('active');
else
$('.header').removeClass('active');
}headerActive();
function arrowRotate() {
if ($window.scrollTop() > windowHeight / 3) {
$('.arrow-down svg').css({
'transform': 'rotate(180deg)'
});
$('.arrow-down a').attr('href', '#home');
}
else {
$('.arrow-down svg').css({
'transform': 'rotate(0)'
});
$('.arrow-down a').attr('href', '#about-me');
}
}arrowRotate();
function toast_create(heading, text, icon) {
return {
heading: heading,
text : text,
showHideTransition : 'fade',
textColor : '#fff',
icon: icon,
hideAfter : 5000,
stack : 5,
textAlign : 'left',
position : 'bottom-right',
loader: false
}
}
function calendarDataAjax() {
var responsive = '';
action = 'kolesnichenko_events_data';
nonce = calendar.data('nonce');
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: 'action=' + action + '&nonce=' + nonce
}).done(function (data) {
responsive = $.parseJSON(data);
console.log(responsive);
calendarInit(responsive['dataSource']);
listCategoriesAddItem(responsive['dataCategory']);
}).fail(function (error) {
console.log(error);
});
}calendarDataAjax();
function calendarTransformData(calendarData) {
var
startDateArray = '',
endDateArray = '';
$.each(calendarData, function (i, value) {
startDateArray = value['startDate'].split('-');
endDateArray = value['endDate'].split('-');
value['startDate'] = new Date(startDateArray[0], startDateArray[1] - 1, startDateArray[2]);
value['endDate'] = new Date(endDateArray[0], endDateArray[1] - 1, endDateArray[2]);
});
return calendarData;
}
function listCategoriesAddItem(dataCategories) {
$.each(dataCategories, function (i, value) {
$('.calendar-legend__list')
.append('<li class="calendar-legend__item"><span style="background-color: ' + value['color'] + ';" class="calendar-legend__color"></span>' + value['name'] + '</li>')
})
}
function ggpopopverCreateContent(events) {
var
title = '',
text = '',
content = '',
time = '',
startTime = '',
endTime = '',
location = '',
address = '',
country = '',
city = '',
fullAddress = '',
phone = '',
meta = '',
thumbImg = '',
thumbURL = '',
arrowcolor = '',
corporateFlag = '',
eventsFlag = '',
weddingsFlag = '',
ggpopoverTitleClass = '';
ggpopoverTitleClass = 'ggpopover__title';
$.each(events, function (i, value) {
// category flags
corporateFlag = value['eventCategorySlug'] !== 'corporategraduationanniversaries';
eventsFlag = value['eventCategorySlug'] !== 'events';
weddingsFlag = value['eventCategorySlug'] !== 'weddings';
// thumbnail image
thumbImg = (value['thumbURL'] !== false) ? '<div class="ggpopover__img" style="background-image: url('+ value['thumbURL'] +');"></div>' : '';
// text
text = value['text'] !== '' ? '<p class="ggpopover__text">' + value['text'] + '</p>' : '';
ggpopoverTitleClass += text === '' ? ' ' + ggpopoverTitleClass+'_only' : '';
// title
title = value['title'] !== '' ? '<h3 class="' + ggpopoverTitleClass + '">' + value['title'] + '</h3>' : '';
// location
location = (value['location'] !== false) ? '<span><i class="ggpopover__icon icon-building"></i>' + value['location'] + '</span>' : '';
// fullAddress
address = (value['address'] !== false) ? value['address'] + ', ' : '';
city = (value['city'] !== false) ? value['city'] + ', ' : '';
country = (value['country'] !== false) ? value['country'] + ', ' : '';
fullAddress = address + city + country;
fullAddress = fullAddress.substr(0, fullAddress.length - 2); // отсекаем ', ' в конце
fullAddress = fullAddress !== '' ? '<span><i class="ggpopover__icon icon-location"></i>' + fullAddress + '</span>' : '';
// phone
phone = value['phone'] !== '' ? '<span><i class="ggpopover__icon icon-phone"></i>' + value['phone'] + '</span>' : '';
// time
startTime = (value['startTime'] !== '' && weddingsFlag && corporateFlag) ? value['startTime'] : '';
endTime = (value['endTime'] !== '' && weddingsFlag && corporateFlag && eventsFlag) ? ' - ' + value['endTime'] : '';
time = startTime + endTime;
time = (time !== '') ? '<span><i class="ggpopover__icon icon-clock"></i>' + time + '</span>' : '';
// meta data
meta = location + fullAddress + phone + time;
meta = (meta !== '') ? '<p class="ggpopover__meta">' + meta +'</p>' : '';
content = thumbImg + title + text + meta;
arrowcolor = $(meta).is(':empty') ? '#fff' : '#f7f7f7';
});
return {
content: content,
arrowcolor: arrowcolor
};
}
function ggpopoverInit(objElement, objData) {
var args = {
trigger: 'focus',
container: 'body',
html: true,
placement: 'top'
};
$(objElement).ggpopover(Object.assign(args, objData));
}
function calendarInit(dataSource) {
calendar.calendar({
language: 'ru',
dataSource: calendarTransformData(dataSource),
mouseOnDay: function (e) {
var
ggpopoverContentData = {},
element = '';
if (e.events.length > 0) {
ggpopoverContentData = ggpopopverCreateContent(e.events);
element = $(e.element).find('a.ggpopover__link');
ggpopoverInit(element, ggpopoverContentData);
}
},
renderEnd: function () {
var calendarDate = '';
$.each($('td.day:not(.old) .day-content'), function (i, value) {
calendarDate = $(value)['text']();
$(value)
.empty()
.html('<a class="ggpopover__link" href="#events">' + calendarDate + '</a>');
});
}
});
}
//------------------------------------------
//------------------------------------------
//---------------- Events ------------------
//------------------------------------------
$(".toggle-mnu").click(function () {
$(this).toggleClass("on");
$(".nav-menu-mobile").slideToggle();
return false;
});
$window.on('scroll', function () {
headerActive();
arrowRotate();
});
$window.on('resize', function () {
onResize();
});
$('.content__section').mousemove(function () {
$('.content__section--home, .content__section--about-me').css({
'filter': 'grayscale(50%)'
});
});
$('.content__section--home').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x < windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
$('.content__section--about-me').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x > windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'gr | form').submit(function () { //Change
var
th = $(this),
confident = $('#confident');
action = 'send_mail';
nonce = th.data("nonce");
if (!phoneFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный номер телефона.',
'warning'
));
return false;
}
if (!emailFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный E-mail аддресс.',
'warning'
));
return false;
}
if (!confident.prop('checked')){
$.toast(toast_create(
'Внимание...',
'Отправляя сообщение, Вы должны принять пользовательское соглашение и подтвердить, что ознакомлены и согласны с политикой конфиденциальности данного сайта.',
'warning'
));
return false;
}
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: th.serialize() + '&action=' + action + '&nonce=' + nonce
}).done(function (data) {
data = $.parseJSON(data);
if (data.status === 'success')
$.toast(toast_create('Успех...', data.msg, 'success'));
if (data.status === 'error')
$.toast(toast_create('Упс...', data.msg, 'error'));
setTimeout(function () {
// Done Functions
th.trigger("reset");
}, 1000);
}).fail(function (data) {
data = $.parseJSON(data);
$.toast(toast_create('Упс...', data.msg, 'error'));
});
return false;
});
//------------------------------------------
//------------------------------------------
logo = preloader.find('.preloader__logo, .preloader__load');
setTimeout(function () {
logo.fadeOut();
preloader.delay(350).fadeOut('slow');
}, 1000);
});
$.fn.equivalent = function () {
var $blocks = $(this),
maxH = $blocks.eq(0).height();
$blocks.each(function () {
maxH = ( $(this).height() > maxH ) ? $(this).height() : maxH;
});
$blocks.height(maxH);
};
| ayscale(0)'
});
}
});
//E-mail Ajax Send
$('.contact-form__ | conditional_block |
index.js | $(function () {
//---------- Initializing plugins ----------
//------------------------------------------
var
galleryGrid = $('#gallery-grid'),
reviews = $('.reviews'),
preloader = $('.preloader'),
calendar = $('#calendarContainer'),
logo = '',
$window = $(window),
windowWidth = 0,
windowHeight = 0,
x = 0,
phoneFlag = false,
emailFlag = false,
action = '',
nonce = '',
closeMarkup = '<button title="Закрыть (Esc)" type="button" class="mfp-close">' +
'<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"' +
'viewBox="0 0 371.23 371.23" style="enable-background:new 0 0 371.23 371.23;" xml:space="preserve">' +
'<polygon points="371.23,21.213 350.018,0 185.615,164.402 21.213,0 0,21.213 164.402,185.615 0,350.018 21.213,371.23 ' +
'185.615,206.828 350.018,371.23 371.23,350.018 206.828,185.615 "/>' +
'</svg>' +
'</button>',
arrowMarkup = '<button title="%title%" type="button" class="mfp-arrow mfp-arrow-%dir%">' +
'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" x="0px" y="0px" viewBox="0 0 407.437 407.437" style="enable-background:new 0 0 407.437 407.437;" xml:space="preserve" width="512px" height="512px">' +
'<polygon points="386.258,91.567 203.718,273.512 21.179,91.567 0,112.815 203.718,315.87 407.437,112.815 "></polygon>' +
'</svg>' +
'</button>';
//------------------------------------------
//------------------------------------------
//---------- Initializing plugins ----------
//------------------------------------------
// Page Scroll To ID
$('.menu a[href*="#"]').mPageScroll2id({
highlightClass: 'active',
highlightSelector: '.menu a',
scrollEasing: 'linear',
scrollSpeed: 300
});
// ImagesLoad and Isotope
galleryGrid.justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10
})
.on('jg.complete', function () {
$(this).magnificPopup({
delegate: 'a',
type: 'image',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300,
gallery: {
enabled: true,
arrowMarkup: arrowMarkup,
tPrev: 'Назад (стрелка влево)',
tNext: 'Вперед (стрелка вправо)',
tCounter: '<span class="mfp-counter">%curr% из %total%</span>'
}
});
});
$('.reviews-gallery').justifiedGallery({
'rowHeight': 180,
'lastRow': 'justify',
'margins': 10,
'target': '_blank'
});
// Magnific Popup YouTube
$('.popup-youtube').magnificPopup({
type: 'iframe',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
// Magnific Popup Reviews
$('.reviews__link').magnificPopup({
type: 'inline',
closeBtnInside: false,
closeMarkup: closeMarkup,
mainClass: 'mfp-fade',
removalDelay: 300
});
$(":input").inputmask(undefined, {
oncomplete: function () {
if (this.id === "phone") phoneFlag = true;
if (this.id === "email") emailFlag = true;
},
onincomplete: function () {
if (this.id === "phone") phoneFlag = false;
if (this.id === "email") emailFlag = false;
},
onKeyValidation: function (result, opts) {
if (this.id === "phone" && $(this).inputmask("getmetadata")) {
console.log($(this).inputmask("getmetadata")["cd"]);
}
}
});
//------------------------------------------
//------------------------------------------
//---------------- Methods -----------------
//------------------------------------------
function onResize() {
windowWidth = $(window).width();
windowHeight = $(window).height();
reviews.find('.reviews__content').css('height', 'auto').equivalent();
}onResize();
function headerActive() {
if ($window.scrollTop() > 0)
$('.header').addClass('active');
else
$('.header').removeClass('active');
}headerActive();
function arrowRotate() {
if ($window.scrollTop() > windowHeight / 3) {
$('.arrow-down svg').css({
'transform': 'rotate(180deg)'
});
$('.arrow-down a').attr('href', '#home');
}
else {
$('.arrow-down svg').css({
'transform': 'rotate(0)'
});
$('.arrow-down a').attr('href', '#about-me');
}
}arrowRotate();
function toast_create(heading, text, icon) {
return {
heading: heading,
text : text,
showHideTransition : 'fade',
textColor : '#fff',
icon: icon,
hideAfter : 5000,
stack : 5,
textAlign : 'left',
position : 'bottom-right',
loader: false
}
}
function calendarDataAjax() {
var responsive = '';
action = 'kolesnichenko_events_data';
nonce = calendar.data('nonce');
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: 'action=' + action + '&nonce=' + nonce
}).done(function (data) {
responsive = $.parseJSON(data);
console.log(responsive);
calendarInit(responsive['dataSource']);
listCategoriesAddItem(responsive['dataCategory']);
}).fail(function (error) {
console.log(error);
});
}calendarDataAjax();
function calendarTransformData(calendarData) {
var
startDateArray = '',
endDateArray = '';
$.each(calendarData, function (i, value) {
startDateArray = value['startDate'].split('-');
endDateArray = value['endDate'].split('-');
value['startDate'] = new Date(startDateArray[0], startDateArray[1] - 1, startDateArray[2]);
value['endDate'] = new Date(endDateArray[0], endDateArray[1] - 1, endDateArray[2]);
});
return calendarData;
}
function listCategoriesAddItem(dataCategories) {
$.each(dataCategories, function (i, value) {
$('.calendar-legend__list')
.append('<li class="calendar-legend__item"><span style="background-color: ' + value['color'] + ';" class="calendar-legend__color"></span>' + value['name'] + '</li>')
})
}
function ggpopopverCreateContent(events) {
| = '',
text = '',
content = '',
time = '',
startTime = '',
endTime = '',
location = '',
address = '',
country = '',
city = '',
fullAddress = '',
phone = '',
meta = '',
thumbImg = '',
thumbURL = '',
arrowcolor = '',
corporateFlag = '',
eventsFlag = '',
weddingsFlag = '',
ggpopoverTitleClass = '';
ggpopoverTitleClass = 'ggpopover__title';
$.each(events, function (i, value) {
// category flags
corporateFlag = value['eventCategorySlug'] !== 'corporategraduationanniversaries';
eventsFlag = value['eventCategorySlug'] !== 'events';
weddingsFlag = value['eventCategorySlug'] !== 'weddings';
// thumbnail image
thumbImg = (value['thumbURL'] !== false) ? '<div class="ggpopover__img" style="background-image: url('+ value['thumbURL'] +');"></div>' : '';
// text
text = value['text'] !== '' ? '<p class="ggpopover__text">' + value['text'] + '</p>' : '';
ggpopoverTitleClass += text === '' ? ' ' + ggpopoverTitleClass+'_only' : '';
// title
title = value['title'] !== '' ? '<h3 class="' + ggpopoverTitleClass + '">' + value['title'] + '</h3>' : '';
// location
location = (value['location'] !== false) ? '<span><i class="ggpopover__icon icon-building"></i>' + value['location'] + '</span>' : '';
// fullAddress
address = (value['address'] !== false) ? value['address'] + ', ' : '';
city = (value['city'] !== false) ? value['city'] + ', ' : '';
country = (value['country'] !== false) ? value['country'] + ', ' : '';
fullAddress = address + city + country;
fullAddress = fullAddress.substr(0, fullAddress.length - 2); // отсекаем ', ' в конце
fullAddress = fullAddress !== '' ? '<span><i class="ggpopover__icon icon-location"></i>' + fullAddress + '</span>' : '';
// phone
phone = value['phone'] !== '' ? '<span><i class="ggpopover__icon icon-phone"></i>' + value['phone'] + '</span>' : '';
// time
startTime = (value['startTime'] !== '' && weddingsFlag && corporateFlag) ? value['startTime'] : '';
endTime = (value['endTime'] !== '' && weddingsFlag && corporateFlag && eventsFlag) ? ' - ' + value['endTime'] : '';
time = startTime + endTime;
time = (time !== '') ? '<span><i class="ggpopover__icon icon-clock"></i>' + time + '</span>' : '';
// meta data
meta = location + fullAddress + phone + time;
meta = (meta !== '') ? '<p class="ggpopover__meta">' + meta +'</p>' : '';
content = thumbImg + title + text + meta;
arrowcolor = $(meta).is(':empty') ? '#fff' : '#f7f7f7';
});
return {
content: content,
arrowcolor: arrowcolor
};
}
function ggpopoverInit(objElement, objData) {
var args = {
trigger: 'focus',
container: 'body',
html: true,
placement: 'top'
};
$(objElement).ggpopover(Object.assign(args, objData));
}
function calendarInit(dataSource) {
calendar.calendar({
language: 'ru',
dataSource: calendarTransformData(dataSource),
mouseOnDay: function (e) {
var
ggpopoverContentData = {},
element = '';
if (e.events.length > 0) {
ggpopoverContentData = ggpopopverCreateContent(e.events);
element = $(e.element).find('a.ggpopover__link');
ggpopoverInit(element, ggpopoverContentData);
}
},
renderEnd: function () {
var calendarDate = '';
$.each($('td.day:not(.old) .day-content'), function (i, value) {
calendarDate = $(value)['text']();
$(value)
.empty()
.html('<a class="ggpopover__link" href="#events">' + calendarDate + '</a>');
});
}
});
}
//------------------------------------------
//------------------------------------------
//---------------- Events ------------------
//------------------------------------------
$(".toggle-mnu").click(function () {
$(this).toggleClass("on");
$(".nav-menu-mobile").slideToggle();
return false;
});
$window.on('scroll', function () {
headerActive();
arrowRotate();
});
$window.on('resize', function () {
onResize();
});
$('.content__section').mousemove(function () {
$('.content__section--home, .content__section--about-me').css({
'filter': 'grayscale(50%)'
});
});
$('.content__section--home').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x < windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
$('.content__section--about-me').mousemove(function (e) {
x = e.pageX;
if (windowWidth < 768) {
$(this).css({
'filter': 'grayscale(50%)'
});
return false;
}
if (x > windowWidth / 2) {
$(this).css({
'filter': 'grayscale(50%)'
});
}
else {
$(this).css({
'filter': 'grayscale(0)'
});
}
});
//E-mail Ajax Send
$('.contact-form__form').submit(function () { //Change
var
th = $(this),
confident = $('#confident');
action = 'send_mail';
nonce = th.data("nonce");
if (!phoneFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный номер телефона.',
'warning'
));
return false;
}
if (!emailFlag) {
$.toast(toast_create(
'Внимание...',
'Введите корректный E-mail аддресс.',
'warning'
));
return false;
}
if (!confident.prop('checked')){
$.toast(toast_create(
'Внимание...',
'Отправляя сообщение, Вы должны принять пользовательское соглашение и подтвердить, что ознакомлены и согласны с политикой конфиденциальности данного сайта.',
'warning'
));
return false;
}
$.ajax({
type: 'post',
url: admin_ajax.ajaxurl,
data: th.serialize() + '&action=' + action + '&nonce=' + nonce
}).done(function (data) {
data = $.parseJSON(data);
if (data.status === 'success')
$.toast(toast_create('Успех...', data.msg, 'success'));
if (data.status === 'error')
$.toast(toast_create('Упс...', data.msg, 'error'));
setTimeout(function () {
// Done Functions
th.trigger("reset");
}, 1000);
}).fail(function (data) {
data = $.parseJSON(data);
$.toast(toast_create('Упс...', data.msg, 'error'));
});
return false;
});
//------------------------------------------
//------------------------------------------
logo = preloader.find('.preloader__logo, .preloader__load');
setTimeout(function () {
logo.fadeOut();
preloader.delay(350).fadeOut('slow');
}, 1000);
});
$.fn.equivalent = function () {
var $blocks = $(this),
maxH = $blocks.eq(0).height();
$blocks.each(function () {
maxH = ( $(this).height() > maxH ) ? $(this).height() : maxH;
});
$blocks.height(maxH);
};
| var
title | identifier_name |
Housing.py | import os
import tarfile
from six.moves import urllib
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
from zlib import crc32 #For compressing data...
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
####################################################################################################
#This block of code is because Scikit-Learn 0.20 replaced sklearn.preprocessing.Imputer class with
#sklearn.impute.SimpleImputer class
# try:
# from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
# except ImportError:
# from sklearn.preprocessing import Imputer as SimpleImputer
####################################################################################################
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
#Custom transformer to add attributes
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): #No *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self #Nothing else to do
def transform(self, X, y=None):
room_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, room_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, room_per_household, population_per_household]
#This transformer has one hyperparamter, "add_bedrooms_per_room", set to True by default and can easily allow for the
#determination of whether adding this attribute helps the Machine Learning algorithm (gate the data by adding
#a hyperparamter you are not %100 sure about
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# This is not the best method to generate test data...
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data)) #Randomly shuffles data around
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
if __name__ == "__main__":
fetch_housing_data()
#"housing" is a Pandas data frame
housing = load_housing_data()
print(housing.head())
print(housing.info())
# print(housing["longitude"].value_counts())
print(housing.describe())
housing.hist(bins=50, figsize=(20,15))
#plt.show()
#Split dataframe into random training and test sets
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(train_set)
print(test_set)
#Bin data into discrete intervals
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0, 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])
plt.show(housing["income_cat"].hist()) #Now I can do Stratified Sampling (See Book)
#Prepare data for stratified sampling
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]): #This Function Performs Stratified Sampling Based on the Income Category (Recall Male and Female Example...)
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index] #.loc can access a group of rows or columns by label(s)
print(strat_test_set["income_cat"].value_counts()/len(strat_test_set)) #Compare to Histogram to See if Ratio of Test Set Data Matches the Height of the Bars
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": housing["income_cat"].value_counts()/len(housing),
"Stratified": strat_test_set["income_cat"].value_counts()/len(strat_test_set),
"Random": test_set["income_cat"].value_counts()/len(test_set)
}).sort_index()
compare_props["Rand. %Error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %Error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
print(compare_props)
for set_ in(strat_train_set, strat_test_set): #Removing the "Income Category (income_cat) Attribute...
set_.drop("income_cat", axis=1, inplace=True)
#Create a Copy of the Training Set so the Original is not Harmed
housing = strat_train_set.copy()
#Visualize the Data in a Scatterplot
plt.show(housing.plot(kind='scatter', x="longitude", y="latitude", alpha=0.1)) #alpha Helps Highlight High Density Areas
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,)
#plt.show()
#Look at how each Attribute Correlates with Median House Value (Median House Value is the "Target" Attribute)
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12,8))
#plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
#plt.show()
#Try Different Combinations of Attributes Before Feeding Data to Machine Learning Algorithm
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"] = housing["population"]/housing["households"]
#See How Many Attributes There Are
# print(housing.info())
# print(housing.describe())
#Look at Correlation Matrix Again with Median House Value as the Target Value
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
#The Result: "bedrooms_per_room" is more correlated than "total_room" or "total_bedrooms" with Median Housing Value
#Next, the data will be prepared for machine learning algorithms
#First, we will revert to a clean training set. The predictors and labels will be separated since we
#don't want to apply the same transformation to the predictors and the target values
#Creates a Copy of the Data "strat_train_set"
#The predictors and labels are separated since we don't want to necessarily apply the same transformations to the
#predictors and target values
housing = strat_train_set.drop("median_house_value", axis=1) #Drop "median_house_value" from training set and creates a copy of the training set
###NOTE: I believe "median_house_value" was dropped because we are separating the predictors and labels...###
print(housing)
#Create a copy of the "median_house_value" attribute and make it the target
housing_labels = strat_train_set["median_house_value"].copy()
print(housing.info())
#Sample incomplete rows
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
print(sample_incomplete_rows)
print(housing_labels) #Print Training Set (This is only the "median_house_value" attribute)
#Recall: At this point, the "total-bedrooms" attribute is missing some values
#There are three options to take care of the attribute's missing values:
#1.) Get rid of the corresponding districts (rows)
#2.) Get rid of the whole attribute
#3.) Set the values to some value (zero, mean, median etc.)
housing.dropna(subset=["total_bedrooms"]) #Option #1.)
housing.drop("total_bedrooms", axis=1) #Option #2.)
median = housing["total_bedrooms"].median() #Option #3.)
housing["total_bedrooms"].fillna(median, inplace=True) #Whatever this median value is, save it -> We will need it
#later to replace missing values in the test set
#Use Scikit-Learn modules to take care of missing values: SimpleInputer
#First, create instance of SimpleInputer and specify that you want to replace each attribute's missing values with
#the median of the attribute
imputer = SimpleImputer(strategy="median")
#Because the median can only be computed on numerical attributes, we need to copy the data without text attribute
#"ocean_proximity"
housing_num = housing.drop("ocean_proximity", axis=1)
#Now fit the Imputer instance to the training date using the fit() method
imputer.fit(housing_num) #<-- Computed the median of each attribute and stored the results in its statistics_
#instance variable
#Since only "total_bedrooms" attribute was missing date, it only computed median values for that attribute, but
#once the system goes live there can be more missing attributes, so it's better to apply the Imputer to all of the
#numerical attributes
print(imputer.statistics_)
print(housing_num.median().values) #<-- This is just checking to ensure manually computing the median of the
#attribute is the same as using the imputter.fit
#Replace missing values in training set by learned medians (Transform the training set)
#Note: 433 is the median of the "total_bedrooms" attribute
X = imputer.transform(housing_num)
print(imputer.strategy)
#The result is a plain Numpy array containing the transformed features. Now we can put it back into a Pandas
#DataFrame using the following:
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing.index) #housing_num does not include
#"ocean_proximity" attribute
print("\nThis is the housing.index values:")
print(housing.index)
#Since we already stored the incomplete rows in
#"sample_incomplete_rows", we're just checking to ensure those values were replaced with the median
#Recall: the ".loc" locates values in a Pandas DataFrame <-- see documentation
print(housing_tr.loc[sample_incomplete_rows.index.values])
#NOTE: For pushing "bare" repo to Github: $ git remote add origin https://github.com/MSilberberg0619/Machine_Learning_Practice.git
#"ocean_proximity" was left out because it's a text attribute and so the median can't be computed
#To fix, convert these categories from text to numbers using Scikit-Learn's OrdinalEncoder class
housing_cat = housing[["ocean_proximity"]]
print(housing_cat.head(10))
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
print(housing_cat_encoded)
#Can use one-hot encoding to map attributes to categories so the values of the attributes that are more similar
#will have similar encoded values
#We don't want the model to assume some natural ordering to the data --> could result in poor performance or
#unexpected results
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
print(housing_cat_1hot)
housing_cat_1hot.toarray()
print(housing_cat_1hot)
#List of categories using the encoder's categories instance variable
print(cat_encoder.categories_)
#May need to write custom transformations for tasks such as custom cleanup operations
#This transformer class adds the combined attributes discussed earlier
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 #Line 1.1
# get the right column indices: safer than hard-coding indices 3, 4, 5, 6
rooms_ix, bedrooms_ix, population_ix, household_ix = [ #Line 1.2
list(housing.columns).index(col)
for col in ("total_rooms", "total_bedrooms", "population", "households")]
#NOTE: Line 1.1 and Line 1.2 provide the same result, but Line 1.2 is safer, as noted
#Call Instance of "CombinedAttributesAdder Class
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) #Call "CombinedAttributesAdder" constructor
housing_extra_attribs = attr_adder.transform(housing.values) #Call method from "CombinedAttributesAdder class
#Because PyCharm can such sometimes, see "Feature Scaling" on page 66 for information about one of the most
#important transformations: feature scaling. There are two common ways: MinMax (Normalization) and
#Standardization (Convert to Standard Normal Distribution)
#Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly
# if the individual features do not more or less look like standard normally distributed data
# (e.g. Gaussian with 0 mean and unit variance).
#Scikit-Learn provides the "Pipeline" class to help with the sequence of transformations
num_pipeline = Pipeline([ #<-- Pipeline constructor takes a list of name/estimator pairs
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
]) #<-- All but last estimator must be transformers (must have a fit_transform() method)
housing_num_tr = num_pipeline.fit_transform(housing_num) #Utilize numerical pipeline provided by "Pipeline" class
#Calling the "Pipeline's" fit method calls fit_method() sequentially on all transformers, passing the output of each
#call as the parameter to the next call, until it reaches the final estimator which then the fit() method is called
#From the Scikit-Learn website: Sequentially apply a list of transforms and a final estimator. Intermediate steps of
# the pipeline must be ‘transforms’, that is, they must implement fit and transform methods. The final estimator
# only needs to implement fit. The transformers in the pipeline can be cached using memory argument
#Use ColumnTransformer from Scikit-Learn to apply transformation to all columns, whether categorical or numerical
num_attribs = list(housing_num)
cat_atribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs), #<-- Returns a dense matrix
("cat", OneHotEncoder(), cat_atribs), #<-- Returns a sparse matrix
]) #<-- Group together categorical and numerical column names and construct a ColumnTransformer
#Constructor requires a list of tuples with name, a transformer and a list of names (or indices) of columns that the
#transformer should be applied to
#1.) Numerical columns are transformed with the num_pipeline defined earlier |
#Train a Machine Learning model using linear regression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
#Try linear regression model out on a few instances from teh training set!
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:" ,lin_reg.predict(some_data_prepared))
print('\n')
print("Labels: ", list(some_labels))
#Measure the regression model's RMSE on the whole training set using Scikit-Learn's "mean_squared_error" function
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse) #<-- Model underfit the training data... (median_housing_values is between $120,000 and $265,000)
#The underfitting of the model says two things:
#1.) The features do not provide enough information to make good predictions
#2.) The model is not powerful enough
#Try to train with a DecisionTreeRegressor --> This is a powerful model that is capable of finding nonlinear
#relationships in the data (Decision Trees will be presented in more detail in Chapter 4)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels) #<-- Training the model
housing_predictions = tree_reg.predict(housing_prepared) #<-- Test the trained model using the training set
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
#This gave an error of zero, but this is likely not possible. It is more likely that the model badly overfit the
#data. What'st he reason we believe this: Earlier, it was discussed that we don't want to touch the test set until
#we're ready to launch, so we should instead use part of the training set for training and part for model validation
#One way to evaluate the Decision Tree model would be to use the train_test_split function to split the
#training set into a smaller training set and a validation set, then train the models against the smaller
#training set and evaluate them against the validation set
#An alternative is to use Scikit-Learn's "cross-validation" feature that performs K-fold cross validation
#K-fold cross validation: Randomly splits the training set into 10 distinct subsets (folds), then it trains and
#evaluates the Decision Tree model 10 times, picking a different fold (subset) every evaluation time and
#training on the other 9 folds (subsets). This results in an array containing the 10 evaluation scores
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
tree_rmse_scores = np.sqrt(-scores) #<-- Cross-validation expects a utility function instead of a cost function,
#so the scoring function os actually the OPPOSITE of the MSE (negative value)
print(tree_rmse_scores)
print("Scores: ", tree_rmse_scores)
print("Mean: ", tree_rmse_scores.mean())
print("Standard Deviation: ", tree_rmse_scores.std())
#Compute the same scores for the Linear Regression model
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
print(lin_rmse_scores)
print("Scores: ", lin_rmse_scores)
print("Mean: ", lin_rmse_scores.mean()) #<-- Ten different rmse errors
print("Standard Deviation: ", lin_rmse_scores.std())
#Decision Tree is overfitting so badly that it performs worse than the Linear Regression model
################################### Aside ####################################################
#Cross-validation uses all of the data, one block at a time, to train a model and summarizes the
#results at the end
#In the end, every block of data is used for testing and we can compare methods by seeing how well
#they performed
#Can also use K-fold cross-validation to find the best value for a tuning parameter
#Essentially, 9 blocks of data are used for training and one for testing
##############################################################################################
#Try one more last model for now: RandomForestRegressor --> This is a Random Forest that works by training many
#Decision Trees on random subsets of the features, then averaging out their predictions.
forest_reg = RandomForestRegressor() #<-- Create an instance of the method from the Scikit-Learn package
forest_reg.fit(housing_prepared, housing_labels) #<-- Train the model
housing_predictions = forest_reg.predict(housing_prepared) # <-- Test the trained model using the training set
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
print(forest_rmse)
# Compute the same scores for the Random Forest model
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
print(forest_rmse_scores)
print("Scores: ", forest_rmse_scores) #<-- Ten different rmse errors
print("Mean: ", forest_rmse_scores.mean())
print("Standard Deviation: ", forest_rmse_scores.std())
#NOTE: Building a model on top of manu other models is called Ensemble Learning
#The results show that the Random Forests perform better than the other two models, but the score on the training
#set is still much lower than on the validation sets, indicating that the model is still overfitting the training
#set. Some possible solutions to mitigate overfitting are as follows:
# 1.) Simplify the model
# 2.) Constrain it (regularize it)
# 3.) Get more training data
#Now it's time to fine-tune the list of selected models...
#One method is to use Scikit-Learn's GridSearchCV to search for viable hyperparameters --> Just tell the method
#which hyperparameters you want to experiment with and which values to try out and it will evaluate all the possible
#combinations of hyperparameters using cross-validation
#This code will search for the best combination of hyperparameter values for the RandomForestRegressor method
#The param_grid tells Scikit-Learn to first evaluate all 3 x 4 = 12 combinations of n_estimators and max_features
#hyperparameter values specified in the first dict (see first row in param_grid), then try all 2 x 3 = 6 combinations
#of hyperparameter values in the second dict (see second row in param_grid), but this time with the bootstrap
#hyperparameter set to False instead of True
#The grid search will ultimately explore 18 combinations of RandomForestRegressor hyperparameter values and will
#train each model five times (we are using five-fold cross validation). This results in a total of 18 x 5 = 90
#rounds of training!
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, #n_estimators is used when you have
#have no idea what the hyperparameter values should be (one strategy is to try out consecutive power of 10)
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_) #<-- The results are the maximum values that were evaluated, so we may want to
#search again
print(grid_search.best_estimator_)
#If GridSearchCV is intitialized with refit=True --> retrains the whole training set once it find the best
#estimator using cross-validation (usually a good performance boost)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
#The RMSE we obtained by iterating through the hyperparameter values is slightly better than the score we received
#from the default hyperparameter values. Thus we successfully fine-tuned the model
#We can also treat the data preparation steps as hyperparameter --> for example, we can determine whether to include
#a certain feature such as the "add_bedrooms_per_room", we can use this feature as a hyperparameter in the
#"CombinedAttributesAdder" transformer. We can also use it to determine how to handle outliers, missing features,
#feature selection and more
####################################################################################################################
#Grid search is sufficient when we are exploring few combinations, but if the hyperparameter search space is
#large, we should use "RandomizedSearchCV" instead --> instead of trying out all possible combinations like when
#we used "GridSearchCV", we use a given number of random combinations by selecting a random value for each
#hyperparameter at every iteration. There are two benefits to this approach:
# 1.) If the randomized search runs for 1,000 iterations, this approach will explore 1,000 different values for
# each hyperparameter
# 2.) We have more control over the computing budget you want to allocate to hyperparameter search just by
# adjusting the number of iterations
#We can also use ensemble methods, such as the Random Forest instead of Decision Trees, to fine-tune the system
####################################################################################################################
#We can gain insight by inspecting the best models and determine the relative importance of each attribute for
#making accurate predictions and drop less useful features.
# feature_importances = grid_search.best_estimator_
# # print(feature_importances)
#
# extra_attribs = ["rooms_per_household", "pop_per_household", "bedrooms_per_household"]
# cat_encoder = full_pipeline.named_transformers_["cat"]
# cat_one_hot_attribs = list(cat_encoder.categories_[0])
# attributes = num_attribs + extra_attribs + cat_one_hot_attribs
# sorted(zip(feature_importances, attributes), reverse=True)
#Once we have a system that performs well from tweaking the models, we can evaluate the final model on the test set
# To do this:
# 1.) Get the predictors and labels from the test set
# 2.) Run "full_pipeline()" to transform the data (call "transform()" not "fit_transform()") --> don't want to
# fit the test set!
# 3.) Evaluate the final model on the test set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse) #<-- Performance may be worse than what was resolved with cross-validation if you did a lot of
#hyperparameter tuning (end up with fine-tuned system that performs well on the validation data). However, if this
#happens, DON'T TWEAK THE HYPERPARAMETERS TO MAKE THE DATA LOOK GOOD ON THE TEST SET; the improvements may still
#not generalize to new data
####################################################################################################################
#Now that the system is ready to launch, we need to plug in production input data sources and write tests. Also, we
#should monitoring code to check the system's live performance at regular intervals and trigger alerts when it
#drops --> Models tend to "rot" over time, unless the models are regularly trained on fresh data
#Next, we should sample the system's predictions and evaluate them to evaluate the system's performance, which will
#require a human analysis. There should be a human evaluation pipeline in the system.
#We should also evaluate the system's input quality --> drop in performance can sometimes be due to a poor quality
#signal (malfunctioning sensor reading etc.). By monitoring the system's inputs this degradation can be caught
#much earlier.
#Finally, we should train models on a regular basis using fresh data with an automated prcess --> if not, a sparsely
#refreshed model and drop in performance or performance fluctuations may occur. If it's an online learning system,
#it's a good idea to save snapshots of its state at regular intervals so we can go back to that state if needed.
#################################################################################################################### | #2.) Categorical columns should be transformed using a OneHotEncoder
#Apply this ColumnTransformer to the housing data --> applies each transformer to the appropriate columns and
#concatenates the outputs along the second axis
housing_prepared = full_pipeline.fit_transform(housing) | random_line_split |
Housing.py | import os
import tarfile
from six.moves import urllib
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
from zlib import crc32 #For compressing data...
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
####################################################################################################
#This block of code is because Scikit-Learn 0.20 replaced sklearn.preprocessing.Imputer class with
#sklearn.impute.SimpleImputer class
# try:
# from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
# except ImportError:
# from sklearn.preprocessing import Imputer as SimpleImputer
####################################################################################################
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
#Custom transformer to add attributes
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): #No *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self #Nothing else to do
def | (self, X, y=None):
room_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, room_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, room_per_household, population_per_household]
#This transformer has one hyperparamter, "add_bedrooms_per_room", set to True by default and can easily allow for the
#determination of whether adding this attribute helps the Machine Learning algorithm (gate the data by adding
#a hyperparamter you are not %100 sure about
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# This is not the best method to generate test data...
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data)) #Randomly shuffles data around
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
if __name__ == "__main__":
fetch_housing_data()
#"housing" is a Pandas data frame
housing = load_housing_data()
print(housing.head())
print(housing.info())
# print(housing["longitude"].value_counts())
print(housing.describe())
housing.hist(bins=50, figsize=(20,15))
#plt.show()
#Split dataframe into random training and test sets
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(train_set)
print(test_set)
#Bin data into discrete intervals
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0, 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])
plt.show(housing["income_cat"].hist()) #Now I can do Stratified Sampling (See Book)
#Prepare data for stratified sampling
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]): #This Function Performs Stratified Sampling Based on the Income Category (Recall Male and Female Example...)
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index] #.loc can access a group of rows or columns by label(s)
print(strat_test_set["income_cat"].value_counts()/len(strat_test_set)) #Compare to Histogram to See if Ratio of Test Set Data Matches the Height of the Bars
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": housing["income_cat"].value_counts()/len(housing),
"Stratified": strat_test_set["income_cat"].value_counts()/len(strat_test_set),
"Random": test_set["income_cat"].value_counts()/len(test_set)
}).sort_index()
compare_props["Rand. %Error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %Error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
print(compare_props)
for set_ in(strat_train_set, strat_test_set): #Removing the "Income Category (income_cat) Attribute...
set_.drop("income_cat", axis=1, inplace=True)
#Create a Copy of the Training Set so the Original is not Harmed
housing = strat_train_set.copy()
#Visualize the Data in a Scatterplot
plt.show(housing.plot(kind='scatter', x="longitude", y="latitude", alpha=0.1)) #alpha Helps Highlight High Density Areas
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,)
#plt.show()
#Look at how each Attribute Correlates with Median House Value (Median House Value is the "Target" Attribute)
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12,8))
#plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
#plt.show()
#Try Different Combinations of Attributes Before Feeding Data to Machine Learning Algorithm
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"] = housing["population"]/housing["households"]
#See How Many Attributes There Are
# print(housing.info())
# print(housing.describe())
#Look at Correlation Matrix Again with Median House Value as the Target Value
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
#The Result: "bedrooms_per_room" is more correlated than "total_room" or "total_bedrooms" with Median Housing Value
#Next, the data will be prepared for machine learning algorithms
#First, we will revert to a clean training set. The predictors and labels will be separated since we
#don't want to apply the same transformation to the predictors and the target values
#Creates a Copy of the Data "strat_train_set"
#The predictors and labels are separated since we don't want to necessarily apply the same transformations to the
#predictors and target values
housing = strat_train_set.drop("median_house_value", axis=1) #Drop "median_house_value" from training set and creates a copy of the training set
###NOTE: I believe "median_house_value" was dropped because we are separating the predictors and labels...###
print(housing)
#Create a copy of the "median_house_value" attribute and make it the target
housing_labels = strat_train_set["median_house_value"].copy()
print(housing.info())
#Sample incomplete rows
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
print(sample_incomplete_rows)
print(housing_labels) #Print Training Set (This is only the "median_house_value" attribute)
#Recall: At this point, the "total-bedrooms" attribute is missing some values
#There are three options to take care of the attribute's missing values:
#1.) Get rid of the corresponding districts (rows)
#2.) Get rid of the whole attribute
#3.) Set the values to some value (zero, mean, median etc.)
housing.dropna(subset=["total_bedrooms"]) #Option #1.)
housing.drop("total_bedrooms", axis=1) #Option #2.)
median = housing["total_bedrooms"].median() #Option #3.)
housing["total_bedrooms"].fillna(median, inplace=True) #Whatever this median value is, save it -> We will need it
#later to replace missing values in the test set
#Use Scikit-Learn modules to take care of missing values: SimpleInputer
#First, create instance of SimpleInputer and specify that you want to replace each attribute's missing values with
#the median of the attribute
imputer = SimpleImputer(strategy="median")
#Because the median can only be computed on numerical attributes, we need to copy the data without text attribute
#"ocean_proximity"
housing_num = housing.drop("ocean_proximity", axis=1)
#Now fit the Imputer instance to the training date using the fit() method
imputer.fit(housing_num) #<-- Computed the median of each attribute and stored the results in its statistics_
#instance variable
#Since only "total_bedrooms" attribute was missing date, it only computed median values for that attribute, but
#once the system goes live there can be more missing attributes, so it's better to apply the Imputer to all of the
#numerical attributes
print(imputer.statistics_)
print(housing_num.median().values) #<-- This is just checking to ensure manually computing the median of the
#attribute is the same as using the imputter.fit
#Replace missing values in training set by learned medians (Transform the training set)
#Note: 433 is the median of the "total_bedrooms" attribute
X = imputer.transform(housing_num)
print(imputer.strategy)
#The result is a plain Numpy array containing the transformed features. Now we can put it back into a Pandas
#DataFrame using the following:
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing.index) #housing_num does not include
#"ocean_proximity" attribute
print("\nThis is the housing.index values:")
print(housing.index)
#Since we already stored the incomplete rows in
#"sample_incomplete_rows", we're just checking to ensure those values were replaced with the median
#Recall: the ".loc" locates values in a Pandas DataFrame <-- see documentation
print(housing_tr.loc[sample_incomplete_rows.index.values])
#NOTE: For pushing "bare" repo to Github: $ git remote add origin https://github.com/MSilberberg0619/Machine_Learning_Practice.git
#"ocean_proximity" was left out because it's a text attribute and so the median can't be computed
#To fix, convert these categories from text to numbers using Scikit-Learn's OrdinalEncoder class
housing_cat = housing[["ocean_proximity"]]
print(housing_cat.head(10))
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
print(housing_cat_encoded)
#Can use one-hot encoding to map attributes to categories so the values of the attributes that are more similar
#will have similar encoded values
#We don't want the model to assume some natural ordering to the data --> could result in poor performance or
#unexpected results
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
print(housing_cat_1hot)
housing_cat_1hot.toarray()
print(housing_cat_1hot)
#List of categories using the encoder's categories instance variable
print(cat_encoder.categories_)
#May need to write custom transformations for tasks such as custom cleanup operations
#This transformer class adds the combined attributes discussed earlier
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 #Line 1.1
# get the right column indices: safer than hard-coding indices 3, 4, 5, 6
rooms_ix, bedrooms_ix, population_ix, household_ix = [ #Line 1.2
list(housing.columns).index(col)
for col in ("total_rooms", "total_bedrooms", "population", "households")]
#NOTE: Line 1.1 and Line 1.2 provide the same result, but Line 1.2 is safer, as noted
#Call Instance of "CombinedAttributesAdder Class
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) #Call "CombinedAttributesAdder" constructor
housing_extra_attribs = attr_adder.transform(housing.values) #Call method from "CombinedAttributesAdder class
#Because PyCharm can such sometimes, see "Feature Scaling" on page 66 for information about one of the most
#important transformations: feature scaling. There are two common ways: MinMax (Normalization) and
#Standardization (Convert to Standard Normal Distribution)
#Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly
# if the individual features do not more or less look like standard normally distributed data
# (e.g. Gaussian with 0 mean and unit variance).
#Scikit-Learn provides the "Pipeline" class to help with the sequence of transformations
num_pipeline = Pipeline([ #<-- Pipeline constructor takes a list of name/estimator pairs
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
]) #<-- All but last estimator must be transformers (must have a fit_transform() method)
housing_num_tr = num_pipeline.fit_transform(housing_num) #Utilize numerical pipeline provided by "Pipeline" class
#Calling the "Pipeline's" fit method calls fit_method() sequentially on all transformers, passing the output of each
#call as the parameter to the next call, until it reaches the final estimator which then the fit() method is called
#From the Scikit-Learn website: Sequentially apply a list of transforms and a final estimator. Intermediate steps of
# the pipeline must be ‘transforms’, that is, they must implement fit and transform methods. The final estimator
# only needs to implement fit. The transformers in the pipeline can be cached using memory argument
#Use ColumnTransformer from Scikit-Learn to apply transformation to all columns, whether categorical or numerical
num_attribs = list(housing_num)
cat_atribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs), #<-- Returns a dense matrix
("cat", OneHotEncoder(), cat_atribs), #<-- Returns a sparse matrix
]) #<-- Group together categorical and numerical column names and construct a ColumnTransformer
#Constructor requires a list of tuples with name, a transformer and a list of names (or indices) of columns that the
#transformer should be applied to
#1.) Numerical columns are transformed with the num_pipeline defined earlier
#2.) Categorical columns should be transformed using a OneHotEncoder
#Apply this ColumnTransformer to the housing data --> applies each transformer to the appropriate columns and
#concatenates the outputs along the second axis
housing_prepared = full_pipeline.fit_transform(housing)
#Train a Machine Learning model using linear regression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
#Try linear regression model out on a few instances from teh training set!
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:" ,lin_reg.predict(some_data_prepared))
print('\n')
print("Labels: ", list(some_labels))
#Measure the regression model's RMSE on the whole training set using Scikit-Learn's "mean_squared_error" function
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse) #<-- Model underfit the training data... (median_housing_values is between $120,000 and $265,000)
#The underfitting of the model says two things:
#1.) The features do not provide enough information to make good predictions
#2.) The model is not powerful enough
#Try to train with a DecisionTreeRegressor --> This is a powerful model that is capable of finding nonlinear
#relationships in the data (Decision Trees will be presented in more detail in Chapter 4)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels) #<-- Training the model
housing_predictions = tree_reg.predict(housing_prepared) #<-- Test the trained model using the training set
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
#This gave an error of zero, but this is likely not possible. It is more likely that the model badly overfit the
#data. What'st he reason we believe this: Earlier, it was discussed that we don't want to touch the test set until
#we're ready to launch, so we should instead use part of the training set for training and part for model validation
#One way to evaluate the Decision Tree model would be to use the train_test_split function to split the
#training set into a smaller training set and a validation set, then train the models against the smaller
#training set and evaluate them against the validation set
#An alternative is to use Scikit-Learn's "cross-validation" feature that performs K-fold cross validation
#K-fold cross validation: Randomly splits the training set into 10 distinct subsets (folds), then it trains and
#evaluates the Decision Tree model 10 times, picking a different fold (subset) every evaluation time and
#training on the other 9 folds (subsets). This results in an array containing the 10 evaluation scores
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
tree_rmse_scores = np.sqrt(-scores) #<-- Cross-validation expects a utility function instead of a cost function,
#so the scoring function os actually the OPPOSITE of the MSE (negative value)
print(tree_rmse_scores)
print("Scores: ", tree_rmse_scores)
print("Mean: ", tree_rmse_scores.mean())
print("Standard Deviation: ", tree_rmse_scores.std())
#Compute the same scores for the Linear Regression model
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
print(lin_rmse_scores)
print("Scores: ", lin_rmse_scores)
print("Mean: ", lin_rmse_scores.mean()) #<-- Ten different rmse errors
print("Standard Deviation: ", lin_rmse_scores.std())
#Decision Tree is overfitting so badly that it performs worse than the Linear Regression model
################################### Aside ####################################################
#Cross-validation uses all of the data, one block at a time, to train a model and summarizes the
#results at the end
#In the end, every block of data is used for testing and we can compare methods by seeing how well
#they performed
#Can also use K-fold cross-validation to find the best value for a tuning parameter
#Essentially, 9 blocks of data are used for training and one for testing
##############################################################################################
#Try one more last model for now: RandomForestRegressor --> This is a Random Forest that works by training many
#Decision Trees on random subsets of the features, then averaging out their predictions.
forest_reg = RandomForestRegressor() #<-- Create an instance of the method from the Scikit-Learn package
forest_reg.fit(housing_prepared, housing_labels) #<-- Train the model
housing_predictions = forest_reg.predict(housing_prepared) # <-- Test the trained model using the training set
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
print(forest_rmse)
# Compute the same scores for the Random Forest model
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
print(forest_rmse_scores)
print("Scores: ", forest_rmse_scores) #<-- Ten different rmse errors
print("Mean: ", forest_rmse_scores.mean())
print("Standard Deviation: ", forest_rmse_scores.std())
#NOTE: Building a model on top of manu other models is called Ensemble Learning
#The results show that the Random Forests perform better than the other two models, but the score on the training
#set is still much lower than on the validation sets, indicating that the model is still overfitting the training
#set. Some possible solutions to mitigate overfitting are as follows:
# 1.) Simplify the model
# 2.) Constrain it (regularize it)
# 3.) Get more training data
#Now it's time to fine-tune the list of selected models...
#One method is to use Scikit-Learn's GridSearchCV to search for viable hyperparameters --> Just tell the method
#which hyperparameters you want to experiment with and which values to try out and it will evaluate all the possible
#combinations of hyperparameters using cross-validation
#This code will search for the best combination of hyperparameter values for the RandomForestRegressor method
#The param_grid tells Scikit-Learn to first evaluate all 3 x 4 = 12 combinations of n_estimators and max_features
#hyperparameter values specified in the first dict (see first row in param_grid), then try all 2 x 3 = 6 combinations
#of hyperparameter values in the second dict (see second row in param_grid), but this time with the bootstrap
#hyperparameter set to False instead of True
#The grid search will ultimately explore 18 combinations of RandomForestRegressor hyperparameter values and will
#train each model five times (we are using five-fold cross validation). This results in a total of 18 x 5 = 90
#rounds of training!
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, #n_estimators is used when you have
#have no idea what the hyperparameter values should be (one strategy is to try out consecutive power of 10)
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_) #<-- The results are the maximum values that were evaluated, so we may want to
#search again
print(grid_search.best_estimator_)
#If GridSearchCV is intitialized with refit=True --> retrains the whole training set once it find the best
#estimator using cross-validation (usually a good performance boost)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
#The RMSE we obtained by iterating through the hyperparameter values is slightly better than the score we received
#from the default hyperparameter values. Thus we successfully fine-tuned the model
#We can also treat the data preparation steps as hyperparameter --> for example, we can determine whether to include
#a certain feature such as the "add_bedrooms_per_room", we can use this feature as a hyperparameter in the
#"CombinedAttributesAdder" transformer. We can also use it to determine how to handle outliers, missing features,
#feature selection and more
####################################################################################################################
#Grid search is sufficient when we are exploring few combinations, but if the hyperparameter search space is
#large, we should use "RandomizedSearchCV" instead --> instead of trying out all possible combinations like when
#we used "GridSearchCV", we use a given number of random combinations by selecting a random value for each
#hyperparameter at every iteration. There are two benefits to this approach:
# 1.) If the randomized search runs for 1,000 iterations, this approach will explore 1,000 different values for
# each hyperparameter
# 2.) We have more control over the computing budget you want to allocate to hyperparameter search just by
# adjusting the number of iterations
#We can also use ensemble methods, such as the Random Forest instead of Decision Trees, to fine-tune the system
####################################################################################################################
#We can gain insight by inspecting the best models and determine the relative importance of each attribute for
#making accurate predictions and drop less useful features.
# feature_importances = grid_search.best_estimator_
# # print(feature_importances)
#
# extra_attribs = ["rooms_per_household", "pop_per_household", "bedrooms_per_household"]
# cat_encoder = full_pipeline.named_transformers_["cat"]
# cat_one_hot_attribs = list(cat_encoder.categories_[0])
# attributes = num_attribs + extra_attribs + cat_one_hot_attribs
# sorted(zip(feature_importances, attributes), reverse=True)
#Once we have a system that performs well from tweaking the models, we can evaluate the final model on the test set
# To do this:
# 1.) Get the predictors and labels from the test set
# 2.) Run "full_pipeline()" to transform the data (call "transform()" not "fit_transform()") --> don't want to
# fit the test set!
# 3.) Evaluate the final model on the test set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse) #<-- Performance may be worse than what was resolved with cross-validation if you did a lot of
#hyperparameter tuning (end up with fine-tuned system that performs well on the validation data). However, if this
#happens, DON'T TWEAK THE HYPERPARAMETERS TO MAKE THE DATA LOOK GOOD ON THE TEST SET; the improvements may still
#not generalize to new data
####################################################################################################################
#Now that the system is ready to launch, we need to plug in production input data sources and write tests. Also, we
#should monitoring code to check the system's live performance at regular intervals and trigger alerts when it
#drops --> Models tend to "rot" over time, unless the models are regularly trained on fresh data
#Next, we should sample the system's predictions and evaluate them to evaluate the system's performance, which will
#require a human analysis. There should be a human evaluation pipeline in the system.
#We should also evaluate the system's input quality --> drop in performance can sometimes be due to a poor quality
#signal (malfunctioning sensor reading etc.). By monitoring the system's inputs this degradation can be caught
#much earlier.
#Finally, we should train models on a regular basis using fresh data with an automated prcess --> if not, a sparsely
#refreshed model and drop in performance or performance fluctuations may occur. If it's an online learning system,
#it's a good idea to save snapshots of its state at regular intervals so we can go back to that state if needed.
#################################################################################################################### | transform | identifier_name |
Housing.py | import os
import tarfile
from six.moves import urllib
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
from zlib import crc32 #For compressing data...
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
####################################################################################################
#This block of code is because Scikit-Learn 0.20 replaced sklearn.preprocessing.Imputer class with
#sklearn.impute.SimpleImputer class
# try:
# from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
# except ImportError:
# from sklearn.preprocessing import Imputer as SimpleImputer
####################################################################################################
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
#Custom transformer to add attributes
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
|
#This transformer has one hyperparamter, "add_bedrooms_per_room", set to True by default and can easily allow for the
#determination of whether adding this attribute helps the Machine Learning algorithm (gate the data by adding
#a hyperparamter you are not %100 sure about
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# This is not the best method to generate test data...
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data)) #Randomly shuffles data around
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
if __name__ == "__main__":
fetch_housing_data()
#"housing" is a Pandas data frame
housing = load_housing_data()
print(housing.head())
print(housing.info())
# print(housing["longitude"].value_counts())
print(housing.describe())
housing.hist(bins=50, figsize=(20,15))
#plt.show()
#Split dataframe into random training and test sets
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(train_set)
print(test_set)
#Bin data into discrete intervals
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0, 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])
plt.show(housing["income_cat"].hist()) #Now I can do Stratified Sampling (See Book)
#Prepare data for stratified sampling
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]): #This Function Performs Stratified Sampling Based on the Income Category (Recall Male and Female Example...)
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index] #.loc can access a group of rows or columns by label(s)
print(strat_test_set["income_cat"].value_counts()/len(strat_test_set)) #Compare to Histogram to See if Ratio of Test Set Data Matches the Height of the Bars
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": housing["income_cat"].value_counts()/len(housing),
"Stratified": strat_test_set["income_cat"].value_counts()/len(strat_test_set),
"Random": test_set["income_cat"].value_counts()/len(test_set)
}).sort_index()
compare_props["Rand. %Error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %Error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
print(compare_props)
for set_ in(strat_train_set, strat_test_set): #Removing the "Income Category (income_cat) Attribute...
set_.drop("income_cat", axis=1, inplace=True)
#Create a Copy of the Training Set so the Original is not Harmed
housing = strat_train_set.copy()
#Visualize the Data in a Scatterplot
plt.show(housing.plot(kind='scatter', x="longitude", y="latitude", alpha=0.1)) #alpha Helps Highlight High Density Areas
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,)
#plt.show()
#Look at how each Attribute Correlates with Median House Value (Median House Value is the "Target" Attribute)
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12,8))
#plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
#plt.show()
#Try Different Combinations of Attributes Before Feeding Data to Machine Learning Algorithm
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"] = housing["population"]/housing["households"]
#See How Many Attributes There Are
# print(housing.info())
# print(housing.describe())
#Look at Correlation Matrix Again with Median House Value as the Target Value
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
#The Result: "bedrooms_per_room" is more correlated than "total_room" or "total_bedrooms" with Median Housing Value
#Next, the data will be prepared for machine learning algorithms
#First, we will revert to a clean training set. The predictors and labels will be separated since we
#don't want to apply the same transformation to the predictors and the target values
#Creates a Copy of the Data "strat_train_set"
#The predictors and labels are separated since we don't want to necessarily apply the same transformations to the
#predictors and target values
housing = strat_train_set.drop("median_house_value", axis=1) #Drop "median_house_value" from training set and creates a copy of the training set
###NOTE: I believe "median_house_value" was dropped because we are separating the predictors and labels...###
print(housing)
#Create a copy of the "median_house_value" attribute and make it the target
housing_labels = strat_train_set["median_house_value"].copy()
print(housing.info())
#Sample incomplete rows
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
print(sample_incomplete_rows)
print(housing_labels) #Print Training Set (This is only the "median_house_value" attribute)
#Recall: At this point, the "total-bedrooms" attribute is missing some values
#There are three options to take care of the attribute's missing values:
#1.) Get rid of the corresponding districts (rows)
#2.) Get rid of the whole attribute
#3.) Set the values to some value (zero, mean, median etc.)
housing.dropna(subset=["total_bedrooms"]) #Option #1.)
housing.drop("total_bedrooms", axis=1) #Option #2.)
median = housing["total_bedrooms"].median() #Option #3.)
housing["total_bedrooms"].fillna(median, inplace=True) #Whatever this median value is, save it -> We will need it
#later to replace missing values in the test set
#Use Scikit-Learn modules to take care of missing values: SimpleInputer
#First, create instance of SimpleInputer and specify that you want to replace each attribute's missing values with
#the median of the attribute
imputer = SimpleImputer(strategy="median")
#Because the median can only be computed on numerical attributes, we need to copy the data without text attribute
#"ocean_proximity"
housing_num = housing.drop("ocean_proximity", axis=1)
#Now fit the Imputer instance to the training date using the fit() method
imputer.fit(housing_num) #<-- Computed the median of each attribute and stored the results in its statistics_
#instance variable
#Since only "total_bedrooms" attribute was missing date, it only computed median values for that attribute, but
#once the system goes live there can be more missing attributes, so it's better to apply the Imputer to all of the
#numerical attributes
print(imputer.statistics_)
print(housing_num.median().values) #<-- This is just checking to ensure manually computing the median of the
#attribute is the same as using the imputter.fit
#Replace missing values in training set by learned medians (Transform the training set)
#Note: 433 is the median of the "total_bedrooms" attribute
X = imputer.transform(housing_num)
print(imputer.strategy)
#The result is a plain Numpy array containing the transformed features. Now we can put it back into a Pandas
#DataFrame using the following:
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing.index) #housing_num does not include
#"ocean_proximity" attribute
print("\nThis is the housing.index values:")
print(housing.index)
#Since we already stored the incomplete rows in
#"sample_incomplete_rows", we're just checking to ensure those values were replaced with the median
#Recall: the ".loc" locates values in a Pandas DataFrame <-- see documentation
print(housing_tr.loc[sample_incomplete_rows.index.values])
#NOTE: For pushing "bare" repo to Github: $ git remote add origin https://github.com/MSilberberg0619/Machine_Learning_Practice.git
#"ocean_proximity" was left out because it's a text attribute and so the median can't be computed
#To fix, convert these categories from text to numbers using Scikit-Learn's OrdinalEncoder class
housing_cat = housing[["ocean_proximity"]]
print(housing_cat.head(10))
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
print(housing_cat_encoded)
#Can use one-hot encoding to map attributes to categories so the values of the attributes that are more similar
#will have similar encoded values
#We don't want the model to assume some natural ordering to the data --> could result in poor performance or
#unexpected results
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
print(housing_cat_1hot)
housing_cat_1hot.toarray()
print(housing_cat_1hot)
#List of categories using the encoder's categories instance variable
print(cat_encoder.categories_)
#May need to write custom transformations for tasks such as custom cleanup operations
#This transformer class adds the combined attributes discussed earlier
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 #Line 1.1
# get the right column indices: safer than hard-coding indices 3, 4, 5, 6
rooms_ix, bedrooms_ix, population_ix, household_ix = [ #Line 1.2
list(housing.columns).index(col)
for col in ("total_rooms", "total_bedrooms", "population", "households")]
#NOTE: Line 1.1 and Line 1.2 provide the same result, but Line 1.2 is safer, as noted
#Call Instance of "CombinedAttributesAdder Class
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) #Call "CombinedAttributesAdder" constructor
housing_extra_attribs = attr_adder.transform(housing.values) #Call method from "CombinedAttributesAdder class
#Because PyCharm can such sometimes, see "Feature Scaling" on page 66 for information about one of the most
#important transformations: feature scaling. There are two common ways: MinMax (Normalization) and
#Standardization (Convert to Standard Normal Distribution)
#Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly
# if the individual features do not more or less look like standard normally distributed data
# (e.g. Gaussian with 0 mean and unit variance).
#Scikit-Learn provides the "Pipeline" class to help with the sequence of transformations
num_pipeline = Pipeline([ #<-- Pipeline constructor takes a list of name/estimator pairs
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
]) #<-- All but last estimator must be transformers (must have a fit_transform() method)
housing_num_tr = num_pipeline.fit_transform(housing_num) #Utilize numerical pipeline provided by "Pipeline" class
#Calling the "Pipeline's" fit method calls fit_method() sequentially on all transformers, passing the output of each
#call as the parameter to the next call, until it reaches the final estimator which then the fit() method is called
#From the Scikit-Learn website: Sequentially apply a list of transforms and a final estimator. Intermediate steps of
# the pipeline must be ‘transforms’, that is, they must implement fit and transform methods. The final estimator
# only needs to implement fit. The transformers in the pipeline can be cached using memory argument
#Use ColumnTransformer from Scikit-Learn to apply transformation to all columns, whether categorical or numerical
num_attribs = list(housing_num)
cat_atribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs), #<-- Returns a dense matrix
("cat", OneHotEncoder(), cat_atribs), #<-- Returns a sparse matrix
]) #<-- Group together categorical and numerical column names and construct a ColumnTransformer
#Constructor requires a list of tuples with name, a transformer and a list of names (or indices) of columns that the
#transformer should be applied to
#1.) Numerical columns are transformed with the num_pipeline defined earlier
#2.) Categorical columns should be transformed using a OneHotEncoder
#Apply this ColumnTransformer to the housing data --> applies each transformer to the appropriate columns and
#concatenates the outputs along the second axis
housing_prepared = full_pipeline.fit_transform(housing)
#Train a Machine Learning model using linear regression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
#Try linear regression model out on a few instances from teh training set!
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:" ,lin_reg.predict(some_data_prepared))
print('\n')
print("Labels: ", list(some_labels))
#Measure the regression model's RMSE on the whole training set using Scikit-Learn's "mean_squared_error" function
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse) #<-- Model underfit the training data... (median_housing_values is between $120,000 and $265,000)
#The underfitting of the model says two things:
#1.) The features do not provide enough information to make good predictions
#2.) The model is not powerful enough
#Try to train with a DecisionTreeRegressor --> This is a powerful model that is capable of finding nonlinear
#relationships in the data (Decision Trees will be presented in more detail in Chapter 4)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels) #<-- Training the model
housing_predictions = tree_reg.predict(housing_prepared) #<-- Test the trained model using the training set
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
#This gave an error of zero, but this is likely not possible. It is more likely that the model badly overfit the
#data. What'st he reason we believe this: Earlier, it was discussed that we don't want to touch the test set until
#we're ready to launch, so we should instead use part of the training set for training and part for model validation
#One way to evaluate the Decision Tree model would be to use the train_test_split function to split the
#training set into a smaller training set and a validation set, then train the models against the smaller
#training set and evaluate them against the validation set
#An alternative is to use Scikit-Learn's "cross-validation" feature that performs K-fold cross validation
#K-fold cross validation: Randomly splits the training set into 10 distinct subsets (folds), then it trains and
#evaluates the Decision Tree model 10 times, picking a different fold (subset) every evaluation time and
#training on the other 9 folds (subsets). This results in an array containing the 10 evaluation scores
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
tree_rmse_scores = np.sqrt(-scores) #<-- Cross-validation expects a utility function instead of a cost function,
#so the scoring function os actually the OPPOSITE of the MSE (negative value)
print(tree_rmse_scores)
print("Scores: ", tree_rmse_scores)
print("Mean: ", tree_rmse_scores.mean())
print("Standard Deviation: ", tree_rmse_scores.std())
#Compute the same scores for the Linear Regression model
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
print(lin_rmse_scores)
print("Scores: ", lin_rmse_scores)
print("Mean: ", lin_rmse_scores.mean()) #<-- Ten different rmse errors
print("Standard Deviation: ", lin_rmse_scores.std())
#Decision Tree is overfitting so badly that it performs worse than the Linear Regression model
################################### Aside ####################################################
#Cross-validation uses all of the data, one block at a time, to train a model and summarizes the
#results at the end
#In the end, every block of data is used for testing and we can compare methods by seeing how well
#they performed
#Can also use K-fold cross-validation to find the best value for a tuning parameter
#Essentially, 9 blocks of data are used for training and one for testing
##############################################################################################
#Try one more last model for now: RandomForestRegressor --> This is a Random Forest that works by training many
#Decision Trees on random subsets of the features, then averaging out their predictions.
forest_reg = RandomForestRegressor() #<-- Create an instance of the method from the Scikit-Learn package
forest_reg.fit(housing_prepared, housing_labels) #<-- Train the model
housing_predictions = forest_reg.predict(housing_prepared) # <-- Test the trained model using the training set
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
print(forest_rmse)
# Compute the same scores for the Random Forest model
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
print(forest_rmse_scores)
print("Scores: ", forest_rmse_scores) #<-- Ten different rmse errors
print("Mean: ", forest_rmse_scores.mean())
print("Standard Deviation: ", forest_rmse_scores.std())
#NOTE: Building a model on top of manu other models is called Ensemble Learning
#The results show that the Random Forests perform better than the other two models, but the score on the training
#set is still much lower than on the validation sets, indicating that the model is still overfitting the training
#set. Some possible solutions to mitigate overfitting are as follows:
# 1.) Simplify the model
# 2.) Constrain it (regularize it)
# 3.) Get more training data
#Now it's time to fine-tune the list of selected models...
#One method is to use Scikit-Learn's GridSearchCV to search for viable hyperparameters --> Just tell the method
#which hyperparameters you want to experiment with and which values to try out and it will evaluate all the possible
#combinations of hyperparameters using cross-validation
#This code will search for the best combination of hyperparameter values for the RandomForestRegressor method
#The param_grid tells Scikit-Learn to first evaluate all 3 x 4 = 12 combinations of n_estimators and max_features
#hyperparameter values specified in the first dict (see first row in param_grid), then try all 2 x 3 = 6 combinations
#of hyperparameter values in the second dict (see second row in param_grid), but this time with the bootstrap
#hyperparameter set to False instead of True
#The grid search will ultimately explore 18 combinations of RandomForestRegressor hyperparameter values and will
#train each model five times (we are using five-fold cross validation). This results in a total of 18 x 5 = 90
#rounds of training!
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, #n_estimators is used when you have
#have no idea what the hyperparameter values should be (one strategy is to try out consecutive power of 10)
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_) #<-- The results are the maximum values that were evaluated, so we may want to
#search again
print(grid_search.best_estimator_)
#If GridSearchCV is intitialized with refit=True --> retrains the whole training set once it find the best
#estimator using cross-validation (usually a good performance boost)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
#The RMSE we obtained by iterating through the hyperparameter values is slightly better than the score we received
#from the default hyperparameter values. Thus we successfully fine-tuned the model
#We can also treat the data preparation steps as hyperparameter --> for example, we can determine whether to include
#a certain feature such as the "add_bedrooms_per_room", we can use this feature as a hyperparameter in the
#"CombinedAttributesAdder" transformer. We can also use it to determine how to handle outliers, missing features,
#feature selection and more
####################################################################################################################
#Grid search is sufficient when we are exploring few combinations, but if the hyperparameter search space is
#large, we should use "RandomizedSearchCV" instead --> instead of trying out all possible combinations like when
#we used "GridSearchCV", we use a given number of random combinations by selecting a random value for each
#hyperparameter at every iteration. There are two benefits to this approach:
# 1.) If the randomized search runs for 1,000 iterations, this approach will explore 1,000 different values for
# each hyperparameter
# 2.) We have more control over the computing budget you want to allocate to hyperparameter search just by
# adjusting the number of iterations
#We can also use ensemble methods, such as the Random Forest instead of Decision Trees, to fine-tune the system
####################################################################################################################
#We can gain insight by inspecting the best models and determine the relative importance of each attribute for
#making accurate predictions and drop less useful features.
# feature_importances = grid_search.best_estimator_
# # print(feature_importances)
#
# extra_attribs = ["rooms_per_household", "pop_per_household", "bedrooms_per_household"]
# cat_encoder = full_pipeline.named_transformers_["cat"]
# cat_one_hot_attribs = list(cat_encoder.categories_[0])
# attributes = num_attribs + extra_attribs + cat_one_hot_attribs
# sorted(zip(feature_importances, attributes), reverse=True)
#Once we have a system that performs well from tweaking the models, we can evaluate the final model on the test set
# To do this:
# 1.) Get the predictors and labels from the test set
# 2.) Run "full_pipeline()" to transform the data (call "transform()" not "fit_transform()") --> don't want to
# fit the test set!
# 3.) Evaluate the final model on the test set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse) #<-- Performance may be worse than what was resolved with cross-validation if you did a lot of
#hyperparameter tuning (end up with fine-tuned system that performs well on the validation data). However, if this
#happens, DON'T TWEAK THE HYPERPARAMETERS TO MAKE THE DATA LOOK GOOD ON THE TEST SET; the improvements may still
#not generalize to new data
####################################################################################################################
#Now that the system is ready to launch, we need to plug in production input data sources and write tests. Also, we
#should monitoring code to check the system's live performance at regular intervals and trigger alerts when it
#drops --> Models tend to "rot" over time, unless the models are regularly trained on fresh data
#Next, we should sample the system's predictions and evaluate them to evaluate the system's performance, which will
#require a human analysis. There should be a human evaluation pipeline in the system.
#We should also evaluate the system's input quality --> drop in performance can sometimes be due to a poor quality
#signal (malfunctioning sensor reading etc.). By monitoring the system's inputs this degradation can be caught
#much earlier.
#Finally, we should train models on a regular basis using fresh data with an automated prcess --> if not, a sparsely
#refreshed model and drop in performance or performance fluctuations may occur. If it's an online learning system,
#it's a good idea to save snapshots of its state at regular intervals so we can go back to that state if needed.
#################################################################################################################### | def __init__(self, add_bedrooms_per_room = True): #No *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self #Nothing else to do
def transform(self, X, y=None):
room_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, room_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, room_per_household, population_per_household] | identifier_body |
Housing.py | import os
import tarfile
from six.moves import urllib
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
from zlib import crc32 #For compressing data...
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
####################################################################################################
#This block of code is because Scikit-Learn 0.20 replaced sklearn.preprocessing.Imputer class with
#sklearn.impute.SimpleImputer class
# try:
# from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
# except ImportError:
# from sklearn.preprocessing import Imputer as SimpleImputer
####################################################################################################
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
#Custom transformer to add attributes
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): #No *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self #Nothing else to do
def transform(self, X, y=None):
room_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, room_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, room_per_household, population_per_household]
#This transformer has one hyperparamter, "add_bedrooms_per_room", set to True by default and can easily allow for the
#determination of whether adding this attribute helps the Machine Learning algorithm (gate the data by adding
#a hyperparamter you are not %100 sure about
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# This is not the best method to generate test data...
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data)) #Randomly shuffles data around
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
if __name__ == "__main__":
fetch_housing_data()
#"housing" is a Pandas data frame
housing = load_housing_data()
print(housing.head())
print(housing.info())
# print(housing["longitude"].value_counts())
print(housing.describe())
housing.hist(bins=50, figsize=(20,15))
#plt.show()
#Split dataframe into random training and test sets
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(train_set)
print(test_set)
#Bin data into discrete intervals
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0, 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])
plt.show(housing["income_cat"].hist()) #Now I can do Stratified Sampling (See Book)
#Prepare data for stratified sampling
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]): #This Function Performs Stratified Sampling Based on the Income Category (Recall Male and Female Example...)
|
print(strat_test_set["income_cat"].value_counts()/len(strat_test_set)) #Compare to Histogram to See if Ratio of Test Set Data Matches the Height of the Bars
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": housing["income_cat"].value_counts()/len(housing),
"Stratified": strat_test_set["income_cat"].value_counts()/len(strat_test_set),
"Random": test_set["income_cat"].value_counts()/len(test_set)
}).sort_index()
compare_props["Rand. %Error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %Error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
print(compare_props)
for set_ in(strat_train_set, strat_test_set): #Removing the "Income Category (income_cat) Attribute...
set_.drop("income_cat", axis=1, inplace=True)
#Create a Copy of the Training Set so the Original is not Harmed
housing = strat_train_set.copy()
#Visualize the Data in a Scatterplot
plt.show(housing.plot(kind='scatter', x="longitude", y="latitude", alpha=0.1)) #alpha Helps Highlight High Density Areas
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,)
#plt.show()
#Look at how each Attribute Correlates with Median House Value (Median House Value is the "Target" Attribute)
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12,8))
#plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
#plt.show()
#Try Different Combinations of Attributes Before Feeding Data to Machine Learning Algorithm
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"] = housing["population"]/housing["households"]
#See How Many Attributes There Are
# print(housing.info())
# print(housing.describe())
#Look at Correlation Matrix Again with Median House Value as the Target Value
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
#The Result: "bedrooms_per_room" is more correlated than "total_room" or "total_bedrooms" with Median Housing Value
#Next, the data will be prepared for machine learning algorithms
#First, we will revert to a clean training set. The predictors and labels will be separated since we
#don't want to apply the same transformation to the predictors and the target values
#Creates a Copy of the Data "strat_train_set"
#The predictors and labels are separated since we don't want to necessarily apply the same transformations to the
#predictors and target values
housing = strat_train_set.drop("median_house_value", axis=1) #Drop "median_house_value" from training set and creates a copy of the training set
###NOTE: I believe "median_house_value" was dropped because we are separating the predictors and labels...###
print(housing)
#Create a copy of the "median_house_value" attribute and make it the target
housing_labels = strat_train_set["median_house_value"].copy()
print(housing.info())
#Sample incomplete rows
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
print(sample_incomplete_rows)
print(housing_labels) #Print Training Set (This is only the "median_house_value" attribute)
#Recall: At this point, the "total-bedrooms" attribute is missing some values
#There are three options to take care of the attribute's missing values:
#1.) Get rid of the corresponding districts (rows)
#2.) Get rid of the whole attribute
#3.) Set the values to some value (zero, mean, median etc.)
housing.dropna(subset=["total_bedrooms"]) #Option #1.)
housing.drop("total_bedrooms", axis=1) #Option #2.)
median = housing["total_bedrooms"].median() #Option #3.)
housing["total_bedrooms"].fillna(median, inplace=True) #Whatever this median value is, save it -> We will need it
#later to replace missing values in the test set
#Use Scikit-Learn modules to take care of missing values: SimpleInputer
#First, create instance of SimpleInputer and specify that you want to replace each attribute's missing values with
#the median of the attribute
imputer = SimpleImputer(strategy="median")
#Because the median can only be computed on numerical attributes, we need to copy the data without text attribute
#"ocean_proximity"
housing_num = housing.drop("ocean_proximity", axis=1)
#Now fit the Imputer instance to the training date using the fit() method
imputer.fit(housing_num) #<-- Computed the median of each attribute and stored the results in its statistics_
#instance variable
#Since only "total_bedrooms" attribute was missing date, it only computed median values for that attribute, but
#once the system goes live there can be more missing attributes, so it's better to apply the Imputer to all of the
#numerical attributes
print(imputer.statistics_)
print(housing_num.median().values) #<-- This is just checking to ensure manually computing the median of the
#attribute is the same as using the imputter.fit
#Replace missing values in training set by learned medians (Transform the training set)
#Note: 433 is the median of the "total_bedrooms" attribute
X = imputer.transform(housing_num)
print(imputer.strategy)
#The result is a plain Numpy array containing the transformed features. Now we can put it back into a Pandas
#DataFrame using the following:
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing.index) #housing_num does not include
#"ocean_proximity" attribute
print("\nThis is the housing.index values:")
print(housing.index)
#Since we already stored the incomplete rows in
#"sample_incomplete_rows", we're just checking to ensure those values were replaced with the median
#Recall: the ".loc" locates values in a Pandas DataFrame <-- see documentation
print(housing_tr.loc[sample_incomplete_rows.index.values])
#NOTE: For pushing "bare" repo to Github: $ git remote add origin https://github.com/MSilberberg0619/Machine_Learning_Practice.git
#"ocean_proximity" was left out because it's a text attribute and so the median can't be computed
#To fix, convert these categories from text to numbers using Scikit-Learn's OrdinalEncoder class
housing_cat = housing[["ocean_proximity"]]
print(housing_cat.head(10))
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
print(housing_cat_encoded)
#Can use one-hot encoding to map attributes to categories so the values of the attributes that are more similar
#will have similar encoded values
#We don't want the model to assume some natural ordering to the data --> could result in poor performance or
#unexpected results
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
print(housing_cat_1hot)
housing_cat_1hot.toarray()
print(housing_cat_1hot)
#List of categories using the encoder's categories instance variable
print(cat_encoder.categories_)
#May need to write custom transformations for tasks such as custom cleanup operations
#This transformer class adds the combined attributes discussed earlier
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 #Line 1.1
# get the right column indices: safer than hard-coding indices 3, 4, 5, 6
rooms_ix, bedrooms_ix, population_ix, household_ix = [ #Line 1.2
list(housing.columns).index(col)
for col in ("total_rooms", "total_bedrooms", "population", "households")]
#NOTE: Line 1.1 and Line 1.2 provide the same result, but Line 1.2 is safer, as noted
#Call Instance of "CombinedAttributesAdder Class
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) #Call "CombinedAttributesAdder" constructor
housing_extra_attribs = attr_adder.transform(housing.values) #Call method from "CombinedAttributesAdder class
#Because PyCharm can such sometimes, see "Feature Scaling" on page 66 for information about one of the most
#important transformations: feature scaling. There are two common ways: MinMax (Normalization) and
#Standardization (Convert to Standard Normal Distribution)
#Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly
# if the individual features do not more or less look like standard normally distributed data
# (e.g. Gaussian with 0 mean and unit variance).
#Scikit-Learn provides the "Pipeline" class to help with the sequence of transformations
num_pipeline = Pipeline([ #<-- Pipeline constructor takes a list of name/estimator pairs
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
]) #<-- All but last estimator must be transformers (must have a fit_transform() method)
housing_num_tr = num_pipeline.fit_transform(housing_num) #Utilize numerical pipeline provided by "Pipeline" class
#Calling the "Pipeline's" fit method calls fit_method() sequentially on all transformers, passing the output of each
#call as the parameter to the next call, until it reaches the final estimator which then the fit() method is called
#From the Scikit-Learn website: Sequentially apply a list of transforms and a final estimator. Intermediate steps of
# the pipeline must be ‘transforms’, that is, they must implement fit and transform methods. The final estimator
# only needs to implement fit. The transformers in the pipeline can be cached using memory argument
#Use ColumnTransformer from Scikit-Learn to apply transformation to all columns, whether categorical or numerical
num_attribs = list(housing_num)
cat_atribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs), #<-- Returns a dense matrix
("cat", OneHotEncoder(), cat_atribs), #<-- Returns a sparse matrix
]) #<-- Group together categorical and numerical column names and construct a ColumnTransformer
#Constructor requires a list of tuples with name, a transformer and a list of names (or indices) of columns that the
#transformer should be applied to
#1.) Numerical columns are transformed with the num_pipeline defined earlier
#2.) Categorical columns should be transformed using a OneHotEncoder
#Apply this ColumnTransformer to the housing data --> applies each transformer to the appropriate columns and
#concatenates the outputs along the second axis
housing_prepared = full_pipeline.fit_transform(housing)
#Train a Machine Learning model using linear regression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
#Try linear regression model out on a few instances from teh training set!
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:" ,lin_reg.predict(some_data_prepared))
print('\n')
print("Labels: ", list(some_labels))
#Measure the regression model's RMSE on the whole training set using Scikit-Learn's "mean_squared_error" function
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse) #<-- Model underfit the training data... (median_housing_values is between $120,000 and $265,000)
#The underfitting of the model says two things:
#1.) The features do not provide enough information to make good predictions
#2.) The model is not powerful enough
#Try to train with a DecisionTreeRegressor --> This is a powerful model that is capable of finding nonlinear
#relationships in the data (Decision Trees will be presented in more detail in Chapter 4)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels) #<-- Training the model
housing_predictions = tree_reg.predict(housing_prepared) #<-- Test the trained model using the training set
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
#This gave an error of zero, but this is likely not possible. It is more likely that the model badly overfit the
#data. What'st he reason we believe this: Earlier, it was discussed that we don't want to touch the test set until
#we're ready to launch, so we should instead use part of the training set for training and part for model validation
#One way to evaluate the Decision Tree model would be to use the train_test_split function to split the
#training set into a smaller training set and a validation set, then train the models against the smaller
#training set and evaluate them against the validation set
#An alternative is to use Scikit-Learn's "cross-validation" feature that performs K-fold cross validation
#K-fold cross validation: Randomly splits the training set into 10 distinct subsets (folds), then it trains and
#evaluates the Decision Tree model 10 times, picking a different fold (subset) every evaluation time and
#training on the other 9 folds (subsets). This results in an array containing the 10 evaluation scores
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
tree_rmse_scores = np.sqrt(-scores) #<-- Cross-validation expects a utility function instead of a cost function,
#so the scoring function os actually the OPPOSITE of the MSE (negative value)
print(tree_rmse_scores)
print("Scores: ", tree_rmse_scores)
print("Mean: ", tree_rmse_scores.mean())
print("Standard Deviation: ", tree_rmse_scores.std())
#Compute the same scores for the Linear Regression model
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
print(lin_rmse_scores)
print("Scores: ", lin_rmse_scores)
print("Mean: ", lin_rmse_scores.mean()) #<-- Ten different rmse errors
print("Standard Deviation: ", lin_rmse_scores.std())
#Decision Tree is overfitting so badly that it performs worse than the Linear Regression model
################################### Aside ####################################################
#Cross-validation uses all of the data, one block at a time, to train a model and summarizes the
#results at the end
#In the end, every block of data is used for testing and we can compare methods by seeing how well
#they performed
#Can also use K-fold cross-validation to find the best value for a tuning parameter
#Essentially, 9 blocks of data are used for training and one for testing
##############################################################################################
#Try one more last model for now: RandomForestRegressor --> This is a Random Forest that works by training many
#Decision Trees on random subsets of the features, then averaging out their predictions.
forest_reg = RandomForestRegressor() #<-- Create an instance of the method from the Scikit-Learn package
forest_reg.fit(housing_prepared, housing_labels) #<-- Train the model
housing_predictions = forest_reg.predict(housing_prepared) # <-- Test the trained model using the training set
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
print(forest_rmse)
# Compute the same scores for the Random Forest model
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error",
cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
print(forest_rmse_scores)
print("Scores: ", forest_rmse_scores) #<-- Ten different rmse errors
print("Mean: ", forest_rmse_scores.mean())
print("Standard Deviation: ", forest_rmse_scores.std())
#NOTE: Building a model on top of manu other models is called Ensemble Learning
#The results show that the Random Forests perform better than the other two models, but the score on the training
#set is still much lower than on the validation sets, indicating that the model is still overfitting the training
#set. Some possible solutions to mitigate overfitting are as follows:
# 1.) Simplify the model
# 2.) Constrain it (regularize it)
# 3.) Get more training data
#Now it's time to fine-tune the list of selected models...
#One method is to use Scikit-Learn's GridSearchCV to search for viable hyperparameters --> Just tell the method
#which hyperparameters you want to experiment with and which values to try out and it will evaluate all the possible
#combinations of hyperparameters using cross-validation
#This code will search for the best combination of hyperparameter values for the RandomForestRegressor method
#The param_grid tells Scikit-Learn to first evaluate all 3 x 4 = 12 combinations of n_estimators and max_features
#hyperparameter values specified in the first dict (see first row in param_grid), then try all 2 x 3 = 6 combinations
#of hyperparameter values in the second dict (see second row in param_grid), but this time with the bootstrap
#hyperparameter set to False instead of True
#The grid search will ultimately explore 18 combinations of RandomForestRegressor hyperparameter values and will
#train each model five times (we are using five-fold cross validation). This results in a total of 18 x 5 = 90
#rounds of training!
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, #n_estimators is used when you have
#have no idea what the hyperparameter values should be (one strategy is to try out consecutive power of 10)
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_) #<-- The results are the maximum values that were evaluated, so we may want to
#search again
print(grid_search.best_estimator_)
#If GridSearchCV is intitialized with refit=True --> retrains the whole training set once it find the best
#estimator using cross-validation (usually a good performance boost)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
#The RMSE we obtained by iterating through the hyperparameter values is slightly better than the score we received
#from the default hyperparameter values. Thus we successfully fine-tuned the model
#We can also treat the data preparation steps as hyperparameter --> for example, we can determine whether to include
#a certain feature such as the "add_bedrooms_per_room", we can use this feature as a hyperparameter in the
#"CombinedAttributesAdder" transformer. We can also use it to determine how to handle outliers, missing features,
#feature selection and more
####################################################################################################################
#Grid search is sufficient when we are exploring few combinations, but if the hyperparameter search space is
#large, we should use "RandomizedSearchCV" instead --> instead of trying out all possible combinations like when
#we used "GridSearchCV", we use a given number of random combinations by selecting a random value for each
#hyperparameter at every iteration. There are two benefits to this approach:
# 1.) If the randomized search runs for 1,000 iterations, this approach will explore 1,000 different values for
# each hyperparameter
# 2.) We have more control over the computing budget you want to allocate to hyperparameter search just by
# adjusting the number of iterations
#We can also use ensemble methods, such as the Random Forest instead of Decision Trees, to fine-tune the system
####################################################################################################################
#We can gain insight by inspecting the best models and determine the relative importance of each attribute for
#making accurate predictions and drop less useful features.
# feature_importances = grid_search.best_estimator_
# # print(feature_importances)
#
# extra_attribs = ["rooms_per_household", "pop_per_household", "bedrooms_per_household"]
# cat_encoder = full_pipeline.named_transformers_["cat"]
# cat_one_hot_attribs = list(cat_encoder.categories_[0])
# attributes = num_attribs + extra_attribs + cat_one_hot_attribs
# sorted(zip(feature_importances, attributes), reverse=True)
#Once we have a system that performs well from tweaking the models, we can evaluate the final model on the test set
# To do this:
# 1.) Get the predictors and labels from the test set
# 2.) Run "full_pipeline()" to transform the data (call "transform()" not "fit_transform()") --> don't want to
# fit the test set!
# 3.) Evaluate the final model on the test set
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse) #<-- Performance may be worse than what was resolved with cross-validation if you did a lot of
#hyperparameter tuning (end up with fine-tuned system that performs well on the validation data). However, if this
#happens, DON'T TWEAK THE HYPERPARAMETERS TO MAKE THE DATA LOOK GOOD ON THE TEST SET; the improvements may still
#not generalize to new data
####################################################################################################################
#Now that the system is ready to launch, we need to plug in production input data sources and write tests. Also, we
#should monitoring code to check the system's live performance at regular intervals and trigger alerts when it
#drops --> Models tend to "rot" over time, unless the models are regularly trained on fresh data
#Next, we should sample the system's predictions and evaluate them to evaluate the system's performance, which will
#require a human analysis. There should be a human evaluation pipeline in the system.
#We should also evaluate the system's input quality --> drop in performance can sometimes be due to a poor quality
#signal (malfunctioning sensor reading etc.). By monitoring the system's inputs this degradation can be caught
#much earlier.
#Finally, we should train models on a regular basis using fresh data with an automated prcess --> if not, a sparsely
#refreshed model and drop in performance or performance fluctuations may occur. If it's an online learning system,
#it's a good idea to save snapshots of its state at regular intervals so we can go back to that state if needed.
#################################################################################################################### | strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index] #.loc can access a group of rows or columns by label(s) | conditional_block |
reader.go | package ppcap
import (
"encoding/binary"
"io"
"os"
"reflect"
"github.com/OneOfOne/xxhash"
)
// DataReadStream is used for streaming packets from a ppcapd (data file).
// Can be used with or without the corresponding index file.
// If used with an index file, call SetReadRange to iterate inside a
// specific range of blocks (or a single block), then NextPacket until EOF.
type DataReadStream struct {
hdrlay PacketHeaderLayout
iseof bool // is the DataReadStream at EOF
isunderlyingeof bool // is the underlying dataFile at EOF
reader io.ReaderAt
buf []byte
bufferOffset int64
bufferUsed int
bufferAvail int
endOffset int64 // if we are, e.g. reading inside one particular block
}
type NextPacketOutput struct {
wholePacketSize int
WholePacket []byte // entire frame
Headers []byte // ppcap headers + protocol headers
Payload []byte // application layer
}
// generic next packet on byte slice
// output is only modified on success
func NextPacket(stream *[]byte, hdrlay *PacketHeaderLayout, output *NextPacketOutput) bool {
read := *stream
*output = NextPacketOutput{}
if len(read) < hdrlay.Size {
return false
}
wholePacketSize := hdrlay.Size + ReadPacketSize(read, hdrlay)
output.wholePacketSize = wholePacketSize
if len(read) < wholePacketSize {
return false
}
// advance stream
*stream = read[wholePacketSize:]
output.WholePacket = read[:wholePacketSize]
output.Headers = read[:hdrlay.Size+hdrlay.ProtocolHeadersSize]
output.Payload = read[hdrlay.Size+hdrlay.ProtocolHeadersSize : wholePacketSize]
return true
}
// without this hack, user code would need to be modified 10+ locations.
// probability of false positive is 2^-40 (about one in a trillion).
func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {
buf := make([]byte, 24)
reader.ReadAt(buf, 0)
if 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&
228 == binary.LittleEndian.Uint32(buf[20:24]) {
BuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)
}
}
func NewDataReadStream(reader io.ReaderAt, hdrlay *PacketHeaderLayout) *DataReadStream {
rs := &DataReadStream{}
rs.hdrlay = *hdrlay
rs.reader = reader
rs.buf = make([]byte, 2*PPCAP_DEFAULT_MAX_BLOCK_SIZE)
XXX_HACK_autodetect_libpcap_layout(reader, &rs.hdrlay)
rs.reset()
return rs
}
func (rs *DataReadStream) refillBuffer(packetLen int) error {
copy(rs.buf, rs.buf[rs.bufferUsed:])
if packetLen*16 > cap(rs.buf) {
// make space for 16x the largest packet, so that the
// circular buffer scheme doesn't have too much copying overhead
newbuf := make([]byte, packetLen*16)
copy(newbuf, rs.buf)
rs.buf = newbuf
}
dest := rs.buf[rs.bufferAvail:]
readSize := len(dest)
if rs.endOffset > 0 {
remaining := rs.endOffset - rs.bufferOffset
if remaining < int64(readSize) {
readSize = int(remaining)
dest = rs.buf[rs.bufferAvail : rs.bufferAvail+readSize]
}
}
if readSize == 0 {
rs.isunderlyingeof = true
return io.EOF
} else {
count, err := rs.reader.ReadAt(dest, rs.bufferOffset)
rs.bufferOffset += int64(count)
rs.bufferAvail += count
rs.bufferUsed = 0
if err == io.EOF {
rs.isunderlyingeof = true
return nil
} else {
return err
}
}
}
func (rs *DataReadStream) reset() {
rs.iseof = false
rs.isunderlyingeof = false
rs.bufferOffset = int64(rs.hdrlay.DataHeaderSize)
rs.bufferAvail = 0
rs.bufferUsed = len(rs.buf)
rs.endOffset = 0
}
// SetReadRange sets up the stream for reading a given range of blocks.
// if start = nil then reading will begin at offset 0.
// if end = nil then reading will end at EOF.
func (rs *DataReadStream) SetReadRange(start *BlockInfo, end *BlockInfo) {
rs.reset()
if start != nil {
rs.bufferOffset = start.Position
}
if end != nil {
rs.endOffset = end.Position + int64(end.ByteCount)
}
}
// ReadNextPacket returns a byte slice containing the next packet in the current read range.
// Once the last packet has been returned, any subsequent calls will EOF.
// If there is a partial packet at the end of the read range, io.ErrUnexpectedEOF is raised.
func (rs *DataReadStream) ReadNextPacket(output *NextPacketOutput) error {
for {
// handle EOF
if rs.iseof {
if rs.bufferAvail == 0 {
return io.EOF
}
return io.ErrUnexpectedEOF
}
rdbuf := rs.buf[rs.bufferUsed : rs.bufferUsed+rs.bufferAvail]
success := NextPacket(&rdbuf, &rs.hdrlay, output)
wholePacketSize := output.wholePacketSize
if success {
rs.bufferAvail -= wholePacketSize
rs.bufferUsed += wholePacketSize
return nil
} else {
// read more unless the underlying file is already EOF
if rs.isunderlyingeof {
rs.iseof = true
} else {
err := rs.refillBuffer(wholePacketSize)
if err != nil {
return err
}
}
}
}
}
func GetNumberOfBlocks(indexFileSize int64, IsTruncated *bool) int {
if indexFileSize < sizeofIndexFileHeader {
return 0
}
indexFileSize -= sizeofIndexFileHeader
*IsTruncated = ((indexFileSize % sizeofIndexEntry) != 0)
return int(indexFileSize / int64(sizeofIndexEntry))
}
func ReadBlockHeaders(indexFd *os.File, headers []BlockInfo, firstBlock int) error {
if len(headers) == 0 {
return nil
}
bufSize := 4096
buf := make([]byte, bufSize)
offset := int64(firstBlock)*sizeofIndexEntry + sizeofIndexFileHeader
for {
blocksToRead := len(headers)
if blocksToRead*sizeofIndexEntry > bufSize {
blocksToRead = bufSize / sizeofIndexEntry
}
raw := buf[:blocksToRead*sizeofIndexEntry]
_, err := indexFd.ReadAt(raw, offset)
if err != nil {
return err
}
for i, _ := range headers[:blocksToRead] {
hdr := &headers[i]
hdr.Parse(raw[i*sizeofIndexEntry:])
}
offset += int64(blocksToRead) * sizeofIndexEntry
headers = headers[blocksToRead:]
if len(headers) == 0 {
break
}
}
return nil
}
// TODO (?):
// Implement a reader class that can read a capture while it
// is being concurrently written by another thread/process.
// This requires thought, and it might not be possible to do generically enough
// given that users will likely want to maintain side datastructures for blocks
type ConcurrentIndexReader struct {
}
// The SimpleInMemoryIndex is suitable for offline use cases
// where the pcap file is not being concurrently written.
// Use ReadIndexIntoMemory to create.
type SimpleInMemoryIndex struct {
Layout PacketHeaderLayout
IndexHeader IndexFileHeader
Blocks []BlockInfo
BlockCount int
IndexFileSize int64
IsTruncated bool
| memidx := &SimpleInMemoryIndex{}
err := ReadIndexFileHeader(indexFd, &memidx.IndexHeader)
if err != nil {
return nil, err
}
BuildPacketHeaderLayout(&memidx.Layout, memidx.IndexHeader.Flags)
memidx.IndexFileSize, err = getFileSize(indexFd)
if err != nil {
return nil, err
}
memidx.BlockCount = GetNumberOfBlocks(memidx.IndexFileSize, &memidx.IsTruncated)
memidx.Blocks = make([]BlockInfo, memidx.BlockCount)
err = ReadBlockHeaders(indexFd, memidx.Blocks, 0)
if err != nil {
return nil, err
}
return memidx, nil
}
type EvaluateCaptureResult struct {
IndexHeader IndexFileHeader
AppearsFlawless bool // all hashes passed, no truncation, sequence numbers OK
IndexIsTruncated bool
DataIsTruncated bool
IsTruncated bool
SequencingOk bool
AllBlocksPassed bool
TotalBlockCount int
GoodBlockCount int
FailedBlockCount int
PacketCount int64
DataSize int64
IndexSize int64
TotalSize int64
TotalPayloadSize int64 // overhead = 100% * (TotalPayloadSize/TotalSize-1)
PacketStreamCount int
PacketStreamHash map[uint16]uint64 // hash for each stream index
}
func EvaluateCapture(where *CapturePath, result *EvaluateCaptureResult) error {
*result = EvaluateCaptureResult{}
// open as readonly exlcusive to ensure that a writer isn't currently active on this capture
indexFd, dataFd, err := OpenCapture(where, os.O_RDONLY, 0644|os.ModeExclusive)
if err != nil {
return err
}
defer indexFd.Close()
defer dataFd.Close()
if indexFd == nil || dataFd == nil {
return os.ErrNotExist
}
memidx, err := ReadIndexIntoMemory(indexFd)
if err != nil {
return err
}
indexFileSize := memidx.IndexFileSize
blockCount := memidx.BlockCount
blockHeaders := memidx.Blocks
dataFileSize, err := getFileSize(dataFd)
if err != nil {
return err
}
dataStream := NewDataReadStream(dataFd, &memidx.Layout)
if blockCount > 0 {
blockHdr := &blockHeaders[blockCount-1]
dataEnd := blockHdr.Position + int64(blockHdr.ByteCount)
if dataEnd > dataFileSize {
result.DataIsTruncated = true
}
if dataEnd < dataFileSize {
result.IndexIsTruncated = true
}
}
var packet NextPacketOutput
var blockHash xxhash.XXHash64
streamHashes := make(map[uint16]*xxhash.XXHash64)
result.SequencingOk = true
for blockIdx := 0; blockIdx < blockCount; blockIdx++ {
blockHdr := &blockHeaders[blockIdx]
dataStream.SetReadRange(blockHdr, blockHdr)
blockHash.Reset()
if result.PacketCount != blockHdr.SeqNum {
result.SequencingOk = false
}
for {
err := dataStream.ReadNextPacket(&packet)
if err != nil {
if err != io.EOF {
// block read error
// don't bail entirely here, just go to the next block
}
break
}
streamIndex := ReadStreamIndex(packet.WholePacket, &memidx.Layout)
if streamHashes[streamIndex] == nil {
streamHashes[streamIndex] = xxhash.New64()
}
streamHashes[streamIndex].Write(packet.Payload)
blockHash.Write(packet.WholePacket)
result.PacketCount += 1
result.TotalPayloadSize += int64(len(packet.Payload))
}
if blockHash.Sum64() == blockHdr.Hash {
result.GoodBlockCount += 1
} else {
result.FailedBlockCount += 1
}
result.TotalBlockCount += 1
}
result.PacketStreamHash = make(map[uint16]uint64)
for k, v := range streamHashes {
result.PacketStreamHash[k] = v.Sum64()
}
result.PacketStreamCount = len(result.PacketStreamHash)
result.DataSize = dataFileSize
result.IndexSize = indexFileSize
result.TotalSize = indexFileSize + dataFileSize
result.IndexHeader = memidx.IndexHeader
result.AllBlocksPassed = (result.FailedBlockCount == 0)
result.IsTruncated = result.DataIsTruncated || result.IndexIsTruncated
result.AppearsFlawless = result.AllBlocksPassed && !result.IsTruncated && result.SequencingOk
return nil
}
func CapturesMatch(a, b *EvaluateCaptureResult) bool {
if !a.AppearsFlawless || !b.AppearsFlawless {
// initial implementation: don't even try to match if there could be some kind of error
return false
}
if a.DataSize != b.DataSize {
return false
}
return reflect.DeepEqual(a.PacketStreamHash, b.PacketStreamHash)
} | }
func ReadIndexIntoMemory(indexFd *os.File) (*SimpleInMemoryIndex, error) {
| random_line_split |
reader.go | package ppcap
import (
"encoding/binary"
"io"
"os"
"reflect"
"github.com/OneOfOne/xxhash"
)
// DataReadStream is used for streaming packets from a ppcapd (data file).
// Can be used with or without the corresponding index file.
// If used with an index file, call SetReadRange to iterate inside a
// specific range of blocks (or a single block), then NextPacket until EOF.
type DataReadStream struct {
hdrlay PacketHeaderLayout
iseof bool // is the DataReadStream at EOF
isunderlyingeof bool // is the underlying dataFile at EOF
reader io.ReaderAt
buf []byte
bufferOffset int64
bufferUsed int
bufferAvail int
endOffset int64 // if we are, e.g. reading inside one particular block
}
type NextPacketOutput struct {
wholePacketSize int
WholePacket []byte // entire frame
Headers []byte // ppcap headers + protocol headers
Payload []byte // application layer
}
// generic next packet on byte slice
// output is only modified on success
func NextPacket(stream *[]byte, hdrlay *PacketHeaderLayout, output *NextPacketOutput) bool {
read := *stream
*output = NextPacketOutput{}
if len(read) < hdrlay.Size {
return false
}
wholePacketSize := hdrlay.Size + ReadPacketSize(read, hdrlay)
output.wholePacketSize = wholePacketSize
if len(read) < wholePacketSize {
return false
}
// advance stream
*stream = read[wholePacketSize:]
output.WholePacket = read[:wholePacketSize]
output.Headers = read[:hdrlay.Size+hdrlay.ProtocolHeadersSize]
output.Payload = read[hdrlay.Size+hdrlay.ProtocolHeadersSize : wholePacketSize]
return true
}
// without this hack, user code would need to be modified 10+ locations.
// probability of false positive is 2^-40 (about one in a trillion).
func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {
buf := make([]byte, 24)
reader.ReadAt(buf, 0)
if 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&
228 == binary.LittleEndian.Uint32(buf[20:24]) {
BuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)
}
}
func NewDataReadStream(reader io.ReaderAt, hdrlay *PacketHeaderLayout) *DataReadStream {
rs := &DataReadStream{}
rs.hdrlay = *hdrlay
rs.reader = reader
rs.buf = make([]byte, 2*PPCAP_DEFAULT_MAX_BLOCK_SIZE)
XXX_HACK_autodetect_libpcap_layout(reader, &rs.hdrlay)
rs.reset()
return rs
}
func (rs *DataReadStream) refillBuffer(packetLen int) error {
copy(rs.buf, rs.buf[rs.bufferUsed:])
if packetLen*16 > cap(rs.buf) {
// make space for 16x the largest packet, so that the
// circular buffer scheme doesn't have too much copying overhead
newbuf := make([]byte, packetLen*16)
copy(newbuf, rs.buf)
rs.buf = newbuf
}
dest := rs.buf[rs.bufferAvail:]
readSize := len(dest)
if rs.endOffset > 0 {
remaining := rs.endOffset - rs.bufferOffset
if remaining < int64(readSize) {
readSize = int(remaining)
dest = rs.buf[rs.bufferAvail : rs.bufferAvail+readSize]
}
}
if readSize == 0 {
rs.isunderlyingeof = true
return io.EOF
} else {
count, err := rs.reader.ReadAt(dest, rs.bufferOffset)
rs.bufferOffset += int64(count)
rs.bufferAvail += count
rs.bufferUsed = 0
if err == io.EOF {
rs.isunderlyingeof = true
return nil
} else {
return err
}
}
}
func (rs *DataReadStream) reset() {
rs.iseof = false
rs.isunderlyingeof = false
rs.bufferOffset = int64(rs.hdrlay.DataHeaderSize)
rs.bufferAvail = 0
rs.bufferUsed = len(rs.buf)
rs.endOffset = 0
}
// SetReadRange sets up the stream for reading a given range of blocks.
// if start = nil then reading will begin at offset 0.
// if end = nil then reading will end at EOF.
func (rs *DataReadStream) SetReadRange(start *BlockInfo, end *BlockInfo) {
rs.reset()
if start != nil {
rs.bufferOffset = start.Position
}
if end != nil {
rs.endOffset = end.Position + int64(end.ByteCount)
}
}
// ReadNextPacket returns a byte slice containing the next packet in the current read range.
// Once the last packet has been returned, any subsequent calls will EOF.
// If there is a partial packet at the end of the read range, io.ErrUnexpectedEOF is raised.
func (rs *DataReadStream) ReadNextPacket(output *NextPacketOutput) error {
for {
// handle EOF
if rs.iseof {
if rs.bufferAvail == 0 {
return io.EOF
}
return io.ErrUnexpectedEOF
}
rdbuf := rs.buf[rs.bufferUsed : rs.bufferUsed+rs.bufferAvail]
success := NextPacket(&rdbuf, &rs.hdrlay, output)
wholePacketSize := output.wholePacketSize
if success | else {
// read more unless the underlying file is already EOF
if rs.isunderlyingeof {
rs.iseof = true
} else {
err := rs.refillBuffer(wholePacketSize)
if err != nil {
return err
}
}
}
}
}
func GetNumberOfBlocks(indexFileSize int64, IsTruncated *bool) int {
if indexFileSize < sizeofIndexFileHeader {
return 0
}
indexFileSize -= sizeofIndexFileHeader
*IsTruncated = ((indexFileSize % sizeofIndexEntry) != 0)
return int(indexFileSize / int64(sizeofIndexEntry))
}
func ReadBlockHeaders(indexFd *os.File, headers []BlockInfo, firstBlock int) error {
if len(headers) == 0 {
return nil
}
bufSize := 4096
buf := make([]byte, bufSize)
offset := int64(firstBlock)*sizeofIndexEntry + sizeofIndexFileHeader
for {
blocksToRead := len(headers)
if blocksToRead*sizeofIndexEntry > bufSize {
blocksToRead = bufSize / sizeofIndexEntry
}
raw := buf[:blocksToRead*sizeofIndexEntry]
_, err := indexFd.ReadAt(raw, offset)
if err != nil {
return err
}
for i, _ := range headers[:blocksToRead] {
hdr := &headers[i]
hdr.Parse(raw[i*sizeofIndexEntry:])
}
offset += int64(blocksToRead) * sizeofIndexEntry
headers = headers[blocksToRead:]
if len(headers) == 0 {
break
}
}
return nil
}
// TODO (?):
// Implement a reader class that can read a capture while it
// is being concurrently written by another thread/process.
// This requires thought, and it might not be possible to do generically enough
// given that users will likely want to maintain side datastructures for blocks
type ConcurrentIndexReader struct {
}
// The SimpleInMemoryIndex is suitable for offline use cases
// where the pcap file is not being concurrently written.
// Use ReadIndexIntoMemory to create.
type SimpleInMemoryIndex struct {
Layout PacketHeaderLayout
IndexHeader IndexFileHeader
Blocks []BlockInfo
BlockCount int
IndexFileSize int64
IsTruncated bool
}
func ReadIndexIntoMemory(indexFd *os.File) (*SimpleInMemoryIndex, error) {
memidx := &SimpleInMemoryIndex{}
err := ReadIndexFileHeader(indexFd, &memidx.IndexHeader)
if err != nil {
return nil, err
}
BuildPacketHeaderLayout(&memidx.Layout, memidx.IndexHeader.Flags)
memidx.IndexFileSize, err = getFileSize(indexFd)
if err != nil {
return nil, err
}
memidx.BlockCount = GetNumberOfBlocks(memidx.IndexFileSize, &memidx.IsTruncated)
memidx.Blocks = make([]BlockInfo, memidx.BlockCount)
err = ReadBlockHeaders(indexFd, memidx.Blocks, 0)
if err != nil {
return nil, err
}
return memidx, nil
}
type EvaluateCaptureResult struct {
IndexHeader IndexFileHeader
AppearsFlawless bool // all hashes passed, no truncation, sequence numbers OK
IndexIsTruncated bool
DataIsTruncated bool
IsTruncated bool
SequencingOk bool
AllBlocksPassed bool
TotalBlockCount int
GoodBlockCount int
FailedBlockCount int
PacketCount int64
DataSize int64
IndexSize int64
TotalSize int64
TotalPayloadSize int64 // overhead = 100% * (TotalPayloadSize/TotalSize-1)
PacketStreamCount int
PacketStreamHash map[uint16]uint64 // hash for each stream index
}
func EvaluateCapture(where *CapturePath, result *EvaluateCaptureResult) error {
*result = EvaluateCaptureResult{}
// open as readonly exlcusive to ensure that a writer isn't currently active on this capture
indexFd, dataFd, err := OpenCapture(where, os.O_RDONLY, 0644|os.ModeExclusive)
if err != nil {
return err
}
defer indexFd.Close()
defer dataFd.Close()
if indexFd == nil || dataFd == nil {
return os.ErrNotExist
}
memidx, err := ReadIndexIntoMemory(indexFd)
if err != nil {
return err
}
indexFileSize := memidx.IndexFileSize
blockCount := memidx.BlockCount
blockHeaders := memidx.Blocks
dataFileSize, err := getFileSize(dataFd)
if err != nil {
return err
}
dataStream := NewDataReadStream(dataFd, &memidx.Layout)
if blockCount > 0 {
blockHdr := &blockHeaders[blockCount-1]
dataEnd := blockHdr.Position + int64(blockHdr.ByteCount)
if dataEnd > dataFileSize {
result.DataIsTruncated = true
}
if dataEnd < dataFileSize {
result.IndexIsTruncated = true
}
}
var packet NextPacketOutput
var blockHash xxhash.XXHash64
streamHashes := make(map[uint16]*xxhash.XXHash64)
result.SequencingOk = true
for blockIdx := 0; blockIdx < blockCount; blockIdx++ {
blockHdr := &blockHeaders[blockIdx]
dataStream.SetReadRange(blockHdr, blockHdr)
blockHash.Reset()
if result.PacketCount != blockHdr.SeqNum {
result.SequencingOk = false
}
for {
err := dataStream.ReadNextPacket(&packet)
if err != nil {
if err != io.EOF {
// block read error
// don't bail entirely here, just go to the next block
}
break
}
streamIndex := ReadStreamIndex(packet.WholePacket, &memidx.Layout)
if streamHashes[streamIndex] == nil {
streamHashes[streamIndex] = xxhash.New64()
}
streamHashes[streamIndex].Write(packet.Payload)
blockHash.Write(packet.WholePacket)
result.PacketCount += 1
result.TotalPayloadSize += int64(len(packet.Payload))
}
if blockHash.Sum64() == blockHdr.Hash {
result.GoodBlockCount += 1
} else {
result.FailedBlockCount += 1
}
result.TotalBlockCount += 1
}
result.PacketStreamHash = make(map[uint16]uint64)
for k, v := range streamHashes {
result.PacketStreamHash[k] = v.Sum64()
}
result.PacketStreamCount = len(result.PacketStreamHash)
result.DataSize = dataFileSize
result.IndexSize = indexFileSize
result.TotalSize = indexFileSize + dataFileSize
result.IndexHeader = memidx.IndexHeader
result.AllBlocksPassed = (result.FailedBlockCount == 0)
result.IsTruncated = result.DataIsTruncated || result.IndexIsTruncated
result.AppearsFlawless = result.AllBlocksPassed && !result.IsTruncated && result.SequencingOk
return nil
}
func CapturesMatch(a, b *EvaluateCaptureResult) bool {
if !a.AppearsFlawless || !b.AppearsFlawless {
// initial implementation: don't even try to match if there could be some kind of error
return false
}
if a.DataSize != b.DataSize {
return false
}
return reflect.DeepEqual(a.PacketStreamHash, b.PacketStreamHash)
}
| {
rs.bufferAvail -= wholePacketSize
rs.bufferUsed += wholePacketSize
return nil
} | conditional_block |
reader.go | package ppcap
import (
"encoding/binary"
"io"
"os"
"reflect"
"github.com/OneOfOne/xxhash"
)
// DataReadStream is used for streaming packets from a ppcapd (data file).
// Can be used with or without the corresponding index file.
// If used with an index file, call SetReadRange to iterate inside a
// specific range of blocks (or a single block), then NextPacket until EOF.
type DataReadStream struct {
hdrlay PacketHeaderLayout
iseof bool // is the DataReadStream at EOF
isunderlyingeof bool // is the underlying dataFile at EOF
reader io.ReaderAt
buf []byte
bufferOffset int64
bufferUsed int
bufferAvail int
endOffset int64 // if we are, e.g. reading inside one particular block
}
type NextPacketOutput struct {
wholePacketSize int
WholePacket []byte // entire frame
Headers []byte // ppcap headers + protocol headers
Payload []byte // application layer
}
// generic next packet on byte slice
// output is only modified on success
func NextPacket(stream *[]byte, hdrlay *PacketHeaderLayout, output *NextPacketOutput) bool {
read := *stream
*output = NextPacketOutput{}
if len(read) < hdrlay.Size {
return false
}
wholePacketSize := hdrlay.Size + ReadPacketSize(read, hdrlay)
output.wholePacketSize = wholePacketSize
if len(read) < wholePacketSize {
return false
}
// advance stream
*stream = read[wholePacketSize:]
output.WholePacket = read[:wholePacketSize]
output.Headers = read[:hdrlay.Size+hdrlay.ProtocolHeadersSize]
output.Payload = read[hdrlay.Size+hdrlay.ProtocolHeadersSize : wholePacketSize]
return true
}
// without this hack, user code would need to be modified 10+ locations.
// probability of false positive is 2^-40 (about one in a trillion).
func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {
buf := make([]byte, 24)
reader.ReadAt(buf, 0)
if 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&
228 == binary.LittleEndian.Uint32(buf[20:24]) {
BuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)
}
}
func NewDataReadStream(reader io.ReaderAt, hdrlay *PacketHeaderLayout) *DataReadStream {
rs := &DataReadStream{}
rs.hdrlay = *hdrlay
rs.reader = reader
rs.buf = make([]byte, 2*PPCAP_DEFAULT_MAX_BLOCK_SIZE)
XXX_HACK_autodetect_libpcap_layout(reader, &rs.hdrlay)
rs.reset()
return rs
}
func (rs *DataReadStream) refillBuffer(packetLen int) error {
copy(rs.buf, rs.buf[rs.bufferUsed:])
if packetLen*16 > cap(rs.buf) {
// make space for 16x the largest packet, so that the
// circular buffer scheme doesn't have too much copying overhead
newbuf := make([]byte, packetLen*16)
copy(newbuf, rs.buf)
rs.buf = newbuf
}
dest := rs.buf[rs.bufferAvail:]
readSize := len(dest)
if rs.endOffset > 0 {
remaining := rs.endOffset - rs.bufferOffset
if remaining < int64(readSize) {
readSize = int(remaining)
dest = rs.buf[rs.bufferAvail : rs.bufferAvail+readSize]
}
}
if readSize == 0 {
rs.isunderlyingeof = true
return io.EOF
} else {
count, err := rs.reader.ReadAt(dest, rs.bufferOffset)
rs.bufferOffset += int64(count)
rs.bufferAvail += count
rs.bufferUsed = 0
if err == io.EOF {
rs.isunderlyingeof = true
return nil
} else {
return err
}
}
}
func (rs *DataReadStream) reset() {
rs.iseof = false
rs.isunderlyingeof = false
rs.bufferOffset = int64(rs.hdrlay.DataHeaderSize)
rs.bufferAvail = 0
rs.bufferUsed = len(rs.buf)
rs.endOffset = 0
}
// SetReadRange sets up the stream for reading a given range of blocks.
// if start = nil then reading will begin at offset 0.
// if end = nil then reading will end at EOF.
func (rs *DataReadStream) SetReadRange(start *BlockInfo, end *BlockInfo) {
rs.reset()
if start != nil {
rs.bufferOffset = start.Position
}
if end != nil {
rs.endOffset = end.Position + int64(end.ByteCount)
}
}
// ReadNextPacket returns a byte slice containing the next packet in the current read range.
// Once the last packet has been returned, any subsequent calls will EOF.
// If there is a partial packet at the end of the read range, io.ErrUnexpectedEOF is raised.
func (rs *DataReadStream) | (output *NextPacketOutput) error {
for {
// handle EOF
if rs.iseof {
if rs.bufferAvail == 0 {
return io.EOF
}
return io.ErrUnexpectedEOF
}
rdbuf := rs.buf[rs.bufferUsed : rs.bufferUsed+rs.bufferAvail]
success := NextPacket(&rdbuf, &rs.hdrlay, output)
wholePacketSize := output.wholePacketSize
if success {
rs.bufferAvail -= wholePacketSize
rs.bufferUsed += wholePacketSize
return nil
} else {
// read more unless the underlying file is already EOF
if rs.isunderlyingeof {
rs.iseof = true
} else {
err := rs.refillBuffer(wholePacketSize)
if err != nil {
return err
}
}
}
}
}
func GetNumberOfBlocks(indexFileSize int64, IsTruncated *bool) int {
if indexFileSize < sizeofIndexFileHeader {
return 0
}
indexFileSize -= sizeofIndexFileHeader
*IsTruncated = ((indexFileSize % sizeofIndexEntry) != 0)
return int(indexFileSize / int64(sizeofIndexEntry))
}
func ReadBlockHeaders(indexFd *os.File, headers []BlockInfo, firstBlock int) error {
if len(headers) == 0 {
return nil
}
bufSize := 4096
buf := make([]byte, bufSize)
offset := int64(firstBlock)*sizeofIndexEntry + sizeofIndexFileHeader
for {
blocksToRead := len(headers)
if blocksToRead*sizeofIndexEntry > bufSize {
blocksToRead = bufSize / sizeofIndexEntry
}
raw := buf[:blocksToRead*sizeofIndexEntry]
_, err := indexFd.ReadAt(raw, offset)
if err != nil {
return err
}
for i, _ := range headers[:blocksToRead] {
hdr := &headers[i]
hdr.Parse(raw[i*sizeofIndexEntry:])
}
offset += int64(blocksToRead) * sizeofIndexEntry
headers = headers[blocksToRead:]
if len(headers) == 0 {
break
}
}
return nil
}
// TODO (?):
// Implement a reader class that can read a capture while it
// is being concurrently written by another thread/process.
// This requires thought, and it might not be possible to do generically enough
// given that users will likely want to maintain side datastructures for blocks
type ConcurrentIndexReader struct {
}
// The SimpleInMemoryIndex is suitable for offline use cases
// where the pcap file is not being concurrently written.
// Use ReadIndexIntoMemory to create.
type SimpleInMemoryIndex struct {
Layout PacketHeaderLayout
IndexHeader IndexFileHeader
Blocks []BlockInfo
BlockCount int
IndexFileSize int64
IsTruncated bool
}
func ReadIndexIntoMemory(indexFd *os.File) (*SimpleInMemoryIndex, error) {
memidx := &SimpleInMemoryIndex{}
err := ReadIndexFileHeader(indexFd, &memidx.IndexHeader)
if err != nil {
return nil, err
}
BuildPacketHeaderLayout(&memidx.Layout, memidx.IndexHeader.Flags)
memidx.IndexFileSize, err = getFileSize(indexFd)
if err != nil {
return nil, err
}
memidx.BlockCount = GetNumberOfBlocks(memidx.IndexFileSize, &memidx.IsTruncated)
memidx.Blocks = make([]BlockInfo, memidx.BlockCount)
err = ReadBlockHeaders(indexFd, memidx.Blocks, 0)
if err != nil {
return nil, err
}
return memidx, nil
}
type EvaluateCaptureResult struct {
IndexHeader IndexFileHeader
AppearsFlawless bool // all hashes passed, no truncation, sequence numbers OK
IndexIsTruncated bool
DataIsTruncated bool
IsTruncated bool
SequencingOk bool
AllBlocksPassed bool
TotalBlockCount int
GoodBlockCount int
FailedBlockCount int
PacketCount int64
DataSize int64
IndexSize int64
TotalSize int64
TotalPayloadSize int64 // overhead = 100% * (TotalPayloadSize/TotalSize-1)
PacketStreamCount int
PacketStreamHash map[uint16]uint64 // hash for each stream index
}
func EvaluateCapture(where *CapturePath, result *EvaluateCaptureResult) error {
*result = EvaluateCaptureResult{}
// open as readonly exlcusive to ensure that a writer isn't currently active on this capture
indexFd, dataFd, err := OpenCapture(where, os.O_RDONLY, 0644|os.ModeExclusive)
if err != nil {
return err
}
defer indexFd.Close()
defer dataFd.Close()
if indexFd == nil || dataFd == nil {
return os.ErrNotExist
}
memidx, err := ReadIndexIntoMemory(indexFd)
if err != nil {
return err
}
indexFileSize := memidx.IndexFileSize
blockCount := memidx.BlockCount
blockHeaders := memidx.Blocks
dataFileSize, err := getFileSize(dataFd)
if err != nil {
return err
}
dataStream := NewDataReadStream(dataFd, &memidx.Layout)
if blockCount > 0 {
blockHdr := &blockHeaders[blockCount-1]
dataEnd := blockHdr.Position + int64(blockHdr.ByteCount)
if dataEnd > dataFileSize {
result.DataIsTruncated = true
}
if dataEnd < dataFileSize {
result.IndexIsTruncated = true
}
}
var packet NextPacketOutput
var blockHash xxhash.XXHash64
streamHashes := make(map[uint16]*xxhash.XXHash64)
result.SequencingOk = true
for blockIdx := 0; blockIdx < blockCount; blockIdx++ {
blockHdr := &blockHeaders[blockIdx]
dataStream.SetReadRange(blockHdr, blockHdr)
blockHash.Reset()
if result.PacketCount != blockHdr.SeqNum {
result.SequencingOk = false
}
for {
err := dataStream.ReadNextPacket(&packet)
if err != nil {
if err != io.EOF {
// block read error
// don't bail entirely here, just go to the next block
}
break
}
streamIndex := ReadStreamIndex(packet.WholePacket, &memidx.Layout)
if streamHashes[streamIndex] == nil {
streamHashes[streamIndex] = xxhash.New64()
}
streamHashes[streamIndex].Write(packet.Payload)
blockHash.Write(packet.WholePacket)
result.PacketCount += 1
result.TotalPayloadSize += int64(len(packet.Payload))
}
if blockHash.Sum64() == blockHdr.Hash {
result.GoodBlockCount += 1
} else {
result.FailedBlockCount += 1
}
result.TotalBlockCount += 1
}
result.PacketStreamHash = make(map[uint16]uint64)
for k, v := range streamHashes {
result.PacketStreamHash[k] = v.Sum64()
}
result.PacketStreamCount = len(result.PacketStreamHash)
result.DataSize = dataFileSize
result.IndexSize = indexFileSize
result.TotalSize = indexFileSize + dataFileSize
result.IndexHeader = memidx.IndexHeader
result.AllBlocksPassed = (result.FailedBlockCount == 0)
result.IsTruncated = result.DataIsTruncated || result.IndexIsTruncated
result.AppearsFlawless = result.AllBlocksPassed && !result.IsTruncated && result.SequencingOk
return nil
}
func CapturesMatch(a, b *EvaluateCaptureResult) bool {
if !a.AppearsFlawless || !b.AppearsFlawless {
// initial implementation: don't even try to match if there could be some kind of error
return false
}
if a.DataSize != b.DataSize {
return false
}
return reflect.DeepEqual(a.PacketStreamHash, b.PacketStreamHash)
}
| ReadNextPacket | identifier_name |
reader.go | package ppcap
import (
"encoding/binary"
"io"
"os"
"reflect"
"github.com/OneOfOne/xxhash"
)
// DataReadStream is used for streaming packets from a ppcapd (data file).
// Can be used with or without the corresponding index file.
// If used with an index file, call SetReadRange to iterate inside a
// specific range of blocks (or a single block), then NextPacket until EOF.
type DataReadStream struct {
hdrlay PacketHeaderLayout
iseof bool // is the DataReadStream at EOF
isunderlyingeof bool // is the underlying dataFile at EOF
reader io.ReaderAt
buf []byte
bufferOffset int64
bufferUsed int
bufferAvail int
endOffset int64 // if we are, e.g. reading inside one particular block
}
type NextPacketOutput struct {
wholePacketSize int
WholePacket []byte // entire frame
Headers []byte // ppcap headers + protocol headers
Payload []byte // application layer
}
// generic next packet on byte slice
// output is only modified on success
func NextPacket(stream *[]byte, hdrlay *PacketHeaderLayout, output *NextPacketOutput) bool {
read := *stream
*output = NextPacketOutput{}
if len(read) < hdrlay.Size {
return false
}
wholePacketSize := hdrlay.Size + ReadPacketSize(read, hdrlay)
output.wholePacketSize = wholePacketSize
if len(read) < wholePacketSize {
return false
}
// advance stream
*stream = read[wholePacketSize:]
output.WholePacket = read[:wholePacketSize]
output.Headers = read[:hdrlay.Size+hdrlay.ProtocolHeadersSize]
output.Payload = read[hdrlay.Size+hdrlay.ProtocolHeadersSize : wholePacketSize]
return true
}
// without this hack, user code would need to be modified 10+ locations.
// probability of false positive is 2^-40 (about one in a trillion).
func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {
buf := make([]byte, 24)
reader.ReadAt(buf, 0)
if 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&
228 == binary.LittleEndian.Uint32(buf[20:24]) {
BuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)
}
}
func NewDataReadStream(reader io.ReaderAt, hdrlay *PacketHeaderLayout) *DataReadStream |
func (rs *DataReadStream) refillBuffer(packetLen int) error {
copy(rs.buf, rs.buf[rs.bufferUsed:])
if packetLen*16 > cap(rs.buf) {
// make space for 16x the largest packet, so that the
// circular buffer scheme doesn't have too much copying overhead
newbuf := make([]byte, packetLen*16)
copy(newbuf, rs.buf)
rs.buf = newbuf
}
dest := rs.buf[rs.bufferAvail:]
readSize := len(dest)
if rs.endOffset > 0 {
remaining := rs.endOffset - rs.bufferOffset
if remaining < int64(readSize) {
readSize = int(remaining)
dest = rs.buf[rs.bufferAvail : rs.bufferAvail+readSize]
}
}
if readSize == 0 {
rs.isunderlyingeof = true
return io.EOF
} else {
count, err := rs.reader.ReadAt(dest, rs.bufferOffset)
rs.bufferOffset += int64(count)
rs.bufferAvail += count
rs.bufferUsed = 0
if err == io.EOF {
rs.isunderlyingeof = true
return nil
} else {
return err
}
}
}
func (rs *DataReadStream) reset() {
rs.iseof = false
rs.isunderlyingeof = false
rs.bufferOffset = int64(rs.hdrlay.DataHeaderSize)
rs.bufferAvail = 0
rs.bufferUsed = len(rs.buf)
rs.endOffset = 0
}
// SetReadRange sets up the stream for reading a given range of blocks.
// if start = nil then reading will begin at offset 0.
// if end = nil then reading will end at EOF.
func (rs *DataReadStream) SetReadRange(start *BlockInfo, end *BlockInfo) {
rs.reset()
if start != nil {
rs.bufferOffset = start.Position
}
if end != nil {
rs.endOffset = end.Position + int64(end.ByteCount)
}
}
// ReadNextPacket returns a byte slice containing the next packet in the current read range.
// Once the last packet has been returned, any subsequent calls will EOF.
// If there is a partial packet at the end of the read range, io.ErrUnexpectedEOF is raised.
func (rs *DataReadStream) ReadNextPacket(output *NextPacketOutput) error {
for {
// handle EOF
if rs.iseof {
if rs.bufferAvail == 0 {
return io.EOF
}
return io.ErrUnexpectedEOF
}
rdbuf := rs.buf[rs.bufferUsed : rs.bufferUsed+rs.bufferAvail]
success := NextPacket(&rdbuf, &rs.hdrlay, output)
wholePacketSize := output.wholePacketSize
if success {
rs.bufferAvail -= wholePacketSize
rs.bufferUsed += wholePacketSize
return nil
} else {
// read more unless the underlying file is already EOF
if rs.isunderlyingeof {
rs.iseof = true
} else {
err := rs.refillBuffer(wholePacketSize)
if err != nil {
return err
}
}
}
}
}
func GetNumberOfBlocks(indexFileSize int64, IsTruncated *bool) int {
if indexFileSize < sizeofIndexFileHeader {
return 0
}
indexFileSize -= sizeofIndexFileHeader
*IsTruncated = ((indexFileSize % sizeofIndexEntry) != 0)
return int(indexFileSize / int64(sizeofIndexEntry))
}
func ReadBlockHeaders(indexFd *os.File, headers []BlockInfo, firstBlock int) error {
if len(headers) == 0 {
return nil
}
bufSize := 4096
buf := make([]byte, bufSize)
offset := int64(firstBlock)*sizeofIndexEntry + sizeofIndexFileHeader
for {
blocksToRead := len(headers)
if blocksToRead*sizeofIndexEntry > bufSize {
blocksToRead = bufSize / sizeofIndexEntry
}
raw := buf[:blocksToRead*sizeofIndexEntry]
_, err := indexFd.ReadAt(raw, offset)
if err != nil {
return err
}
for i, _ := range headers[:blocksToRead] {
hdr := &headers[i]
hdr.Parse(raw[i*sizeofIndexEntry:])
}
offset += int64(blocksToRead) * sizeofIndexEntry
headers = headers[blocksToRead:]
if len(headers) == 0 {
break
}
}
return nil
}
// TODO (?):
// Implement a reader class that can read a capture while it
// is being concurrently written by another thread/process.
// This requires thought, and it might not be possible to do generically enough
// given that users will likely want to maintain side datastructures for blocks
type ConcurrentIndexReader struct {
}
// The SimpleInMemoryIndex is suitable for offline use cases
// where the pcap file is not being concurrently written.
// Use ReadIndexIntoMemory to create.
type SimpleInMemoryIndex struct {
Layout PacketHeaderLayout
IndexHeader IndexFileHeader
Blocks []BlockInfo
BlockCount int
IndexFileSize int64
IsTruncated bool
}
func ReadIndexIntoMemory(indexFd *os.File) (*SimpleInMemoryIndex, error) {
memidx := &SimpleInMemoryIndex{}
err := ReadIndexFileHeader(indexFd, &memidx.IndexHeader)
if err != nil {
return nil, err
}
BuildPacketHeaderLayout(&memidx.Layout, memidx.IndexHeader.Flags)
memidx.IndexFileSize, err = getFileSize(indexFd)
if err != nil {
return nil, err
}
memidx.BlockCount = GetNumberOfBlocks(memidx.IndexFileSize, &memidx.IsTruncated)
memidx.Blocks = make([]BlockInfo, memidx.BlockCount)
err = ReadBlockHeaders(indexFd, memidx.Blocks, 0)
if err != nil {
return nil, err
}
return memidx, nil
}
type EvaluateCaptureResult struct {
IndexHeader IndexFileHeader
AppearsFlawless bool // all hashes passed, no truncation, sequence numbers OK
IndexIsTruncated bool
DataIsTruncated bool
IsTruncated bool
SequencingOk bool
AllBlocksPassed bool
TotalBlockCount int
GoodBlockCount int
FailedBlockCount int
PacketCount int64
DataSize int64
IndexSize int64
TotalSize int64
TotalPayloadSize int64 // overhead = 100% * (TotalPayloadSize/TotalSize-1)
PacketStreamCount int
PacketStreamHash map[uint16]uint64 // hash for each stream index
}
func EvaluateCapture(where *CapturePath, result *EvaluateCaptureResult) error {
*result = EvaluateCaptureResult{}
// open as readonly exlcusive to ensure that a writer isn't currently active on this capture
indexFd, dataFd, err := OpenCapture(where, os.O_RDONLY, 0644|os.ModeExclusive)
if err != nil {
return err
}
defer indexFd.Close()
defer dataFd.Close()
if indexFd == nil || dataFd == nil {
return os.ErrNotExist
}
memidx, err := ReadIndexIntoMemory(indexFd)
if err != nil {
return err
}
indexFileSize := memidx.IndexFileSize
blockCount := memidx.BlockCount
blockHeaders := memidx.Blocks
dataFileSize, err := getFileSize(dataFd)
if err != nil {
return err
}
dataStream := NewDataReadStream(dataFd, &memidx.Layout)
if blockCount > 0 {
blockHdr := &blockHeaders[blockCount-1]
dataEnd := blockHdr.Position + int64(blockHdr.ByteCount)
if dataEnd > dataFileSize {
result.DataIsTruncated = true
}
if dataEnd < dataFileSize {
result.IndexIsTruncated = true
}
}
var packet NextPacketOutput
var blockHash xxhash.XXHash64
streamHashes := make(map[uint16]*xxhash.XXHash64)
result.SequencingOk = true
for blockIdx := 0; blockIdx < blockCount; blockIdx++ {
blockHdr := &blockHeaders[blockIdx]
dataStream.SetReadRange(blockHdr, blockHdr)
blockHash.Reset()
if result.PacketCount != blockHdr.SeqNum {
result.SequencingOk = false
}
for {
err := dataStream.ReadNextPacket(&packet)
if err != nil {
if err != io.EOF {
// block read error
// don't bail entirely here, just go to the next block
}
break
}
streamIndex := ReadStreamIndex(packet.WholePacket, &memidx.Layout)
if streamHashes[streamIndex] == nil {
streamHashes[streamIndex] = xxhash.New64()
}
streamHashes[streamIndex].Write(packet.Payload)
blockHash.Write(packet.WholePacket)
result.PacketCount += 1
result.TotalPayloadSize += int64(len(packet.Payload))
}
if blockHash.Sum64() == blockHdr.Hash {
result.GoodBlockCount += 1
} else {
result.FailedBlockCount += 1
}
result.TotalBlockCount += 1
}
result.PacketStreamHash = make(map[uint16]uint64)
for k, v := range streamHashes {
result.PacketStreamHash[k] = v.Sum64()
}
result.PacketStreamCount = len(result.PacketStreamHash)
result.DataSize = dataFileSize
result.IndexSize = indexFileSize
result.TotalSize = indexFileSize + dataFileSize
result.IndexHeader = memidx.IndexHeader
result.AllBlocksPassed = (result.FailedBlockCount == 0)
result.IsTruncated = result.DataIsTruncated || result.IndexIsTruncated
result.AppearsFlawless = result.AllBlocksPassed && !result.IsTruncated && result.SequencingOk
return nil
}
func CapturesMatch(a, b *EvaluateCaptureResult) bool {
if !a.AppearsFlawless || !b.AppearsFlawless {
// initial implementation: don't even try to match if there could be some kind of error
return false
}
if a.DataSize != b.DataSize {
return false
}
return reflect.DeepEqual(a.PacketStreamHash, b.PacketStreamHash)
}
| {
rs := &DataReadStream{}
rs.hdrlay = *hdrlay
rs.reader = reader
rs.buf = make([]byte, 2*PPCAP_DEFAULT_MAX_BLOCK_SIZE)
XXX_HACK_autodetect_libpcap_layout(reader, &rs.hdrlay)
rs.reset()
return rs
} | identifier_body |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border .. qr.size() + border {
for x in -border .. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else { ' ' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY")
.required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <mail@dbrgn.ch>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false,
server_host,
server_port,
pubkey.as_bytes(),
client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_m | let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
}
| ake_qrcode_data() {
| identifier_name |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border .. qr.size() + border {
for x in -border .. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else { ' ' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
| g(test)]
mod tests {
use super::*;
#[test]
fn test_make_qrcode_data() {
let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
}
|
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY")
.required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <mail@dbrgn.ch>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false,
server_host,
server_port,
pubkey.as_bytes(),
client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cf | identifier_body |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border .. qr.size() + border {
for x in -border .. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else { ' ' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY")
.required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <mail@dbrgn.ch>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false, | client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_qrcode_data() {
let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
} | server_host,
server_port,
pubkey.as_bytes(), | random_line_split |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly = !definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") && !version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if !outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) => !none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> { |
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if !ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
|
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
| identifier_body |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn | () -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly = !definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") && !version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if !outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) => !none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if !ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
| cargo_expand_or_run_nightly | identifier_name |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly = !definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1 | })
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") && !version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if !outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) => !none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if !ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
} | }
} | random_line_split |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly = !definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") && !version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if !outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) => !none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else { |
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if !ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
|
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
| conditional_block |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn exit(&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if !readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 && !self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool);
if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
}
}
}
if let OperatingState::Run(i) = self.operating_state {
if i != 0 {
let interval = time::Duration::from_micros(i as u64); | thread::sleep(interval);
}
}
} | thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64); | random_line_split |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn | (&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if !readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 && !self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool);
if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
}
}
}
if let OperatingState::Run(i) = self.operating_state {
if i != 0 {
let interval = time::Duration::from_micros(i as u64);
thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64);
thread::sleep(interval);
}
}
}
| exit | identifier_name |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn exit(&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if !readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 && !self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty |
}
}
if let OperatingState::Run(i) = self.operating_state {
if i != 0 {
let interval = time::Duration::from_micros(i as u64);
thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64);
thread::sleep(interval);
}
}
}
| {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool);
if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
} | conditional_block |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
| #[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> + 's> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync + 'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> + 's {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask()) != 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits()) != 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
} | warn!("Validation Layer is not found!");
}
| random_line_split |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn | (
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> + 's> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync + 'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> + 's {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask()) != 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits()) != 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| submit_buffered_commands_raw | identifier_name |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> + 's> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync + 'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> + 's {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask()) != 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits()) != 0
}
pub const fn is_device_local(&self) -> bool |
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
} | identifier_body |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> + 's> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync + 'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> + 's {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask()) != 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits()) != 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() |
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| {
flags.push("DEVICE LOCAL");
} | conditional_block |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() |
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
} | conditional_block |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn | (block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| spiral_iterator_is_spiral | identifier_name |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) |
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| {
check_pixel_iterator_covers_block(block.internal_points(), *block);
} | identifier_body |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest] | #[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
} | random_line_split | |
importer.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package srcimporter implements importing directly
// from source files rather than installed packages.
package imports
import (
"fmt"
"github.com/JohnWall2016/gogetdef/parser"
"github.com/JohnWall2016/gogetdef/types"
"go/ast"
"go/build"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
// An Importer provides the context for importing packages from source code.
type Importer struct {
ctxt *build.Context
fset *token.FileSet
sizes types.Sizes
typPkgs map[string]*types.Package
astPkgs *astPkgCache
info *types.Info
IncludeTests func(pkg string) bool
mode parser.Mode
}
type astPkgCache struct {
sync.RWMutex
packages map[string]*ast.Package
}
func (c *astPkgCache) cachedFile(name string) (*ast.File, bool) {
c.RLock()
defer c.RUnlock()
for _, pkg := range c.packages {
f, cached := pkg.Files[name]
if cached {
return f, cached
}
}
return nil, false
}
func (c *astPkgCache) cacheFile(name string, file *ast.File) {
c.Lock()
defer c.Unlock()
pkgName := file.Name.Name
if pkg, ok := c.packages[pkgName]; ok {
pkg.Files[name] = file
} else {
pkg = &ast.Package{
Name: pkgName,
Files: map[string]*ast.File{
name: file,
},
}
c.packages[pkgName] = pkg
}
}
func (c *astPkgCache) cachedPackage(pkgName string) (pkg *ast.Package, ok bool) {
c.RLock()
defer c.RUnlock()
pkg, ok = c.packages[pkgName]
return
}
// NewImporter returns a new Importer for the given context, file set, and map
// of packages. The context is used to resolve import paths to package paths,
// and identifying the files belonging to the package. If the context provides
// non-nil file system functions, they are used instead of the regular package
// os functions. The file set is used to track position information of package
// files; and imported packages are added to the packages map.
func NewImporter(ctxt *build.Context, fset *token.FileSet, info *types.Info, mode parser.Mode) *Importer {
return &Importer{
ctxt: ctxt,
fset: fset,
sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found
typPkgs: make(map[string]*types.Package),
astPkgs: &astPkgCache{packages: make(map[string]*ast.Package)},
info: info,
mode: mode,
}
}
// Importing is a sentinel taking the place in Importer.packages
// for a package that is in the process of being imported.
var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0).
func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", types.NoCheckCycleInDecl|types.NoCheckUsage)
}
// ImportFrom imports the package with the given import path resolved from the given srcDir,
// adds the new package to the set of packages maintained by the importer, and returns the
// package. Package path resolution and file system operations are controlled by the context
// maintained with the importer. The import mode must be zero but is otherwise ignored.
// Packages that are not comprised entirely of pure Go files may fail to import because the
// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies).
func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
// determine package path (do vendor resolution)
var bp *build.Package
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
}
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
// package unsafe is known to the type checker
if bp.ImportPath == "unsafe" {
return types.Unsafe, nil
}
// no need to re-import if the package was imported completely before
pkg := p.typPkgs[bp.ImportPath]
if pkg != nil {
if pkg == &importing {
return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath)
}
if !pkg.Complete() {
// Package exists but is not complete - we cannot handle this
// at the moment since the source importer replaces the package
// wholesale rather than augmenting it (see #19337 for details).
// Return incomplete package with error (see #16088).
return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath)
}
return pkg, nil
}
p.typPkgs[bp.ImportPath] = &importing
defer func() {
// clean up in case of error
// TODO(gri) Eventually we may want to leave a (possibly empty)
// package in the map in all cases (and use that package to
// identify cycles). See also issue 16088.
if p.typPkgs[bp.ImportPath] == &importing {
p.typPkgs[bp.ImportPath] = nil
}
}()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string
filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...)
if p.IncludeTests != nil && p.IncludeTests(bp.ImportPath) {
filenames = append(filenames, bp.TestGoFiles...)
}
files, err := p.parseFiles(bp.Dir, filenames, p.mode, nil)
if err != nil {
return nil, err
}
// type-check package files
var firstHardErr error
conf := types.Config{
CheckFuncBodies: nil,
FakeImportC: true,
// continue type-checking after the first error
Error: func(err error) {
if firstHardErr == nil && !err.(types.Error).Soft {
firstHardErr = err
}
},
Importer: p,
Sizes: p.sizes,
}
pkg, err = conf.Check(bp.ImportPath, p.fset, files, p.info, mode)
if err != nil {
// If there was a hard error it is possibly unsafe
// to use the package as it may not be fully populated.
// Do not return it (see also #20837, #20855).
if firstHardErr != nil {
pkg = nil
err = firstHardErr // give preference to first hard error over any soft error
}
return pkg, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err)
}
if firstHardErr != nil {
// this can only happen if we have a bug in go/types
panic("package is not safe yet no error was returned")
}
p.typPkgs[bp.ImportPath] = pkg
return pkg, nil
}
func (p *Importer) parseFiles(dir string, filenames []string, mode parser.Mode, parseFuncBodies parser.InFuncBodies) ([]*ast.File, error) |
// context-controlled file system operations
func (p *Importer) absPath(path string) (string, error) {
// TODO(gri) This should be using p.ctxt.AbsPath which doesn't
// exist but probably should. See also issue #14282.
return filepath.Abs(path)
}
func (p *Importer) isAbsPath(path string) bool {
if f := p.ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
func (p *Importer) joinPath(elem ...string) string {
if f := p.ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
func (p *Importer) readDir(path string) ([]os.FileInfo, error) {
if f := p.ctxt.ReadDir; f != nil {
return f(path)
}
return ioutil.ReadDir(path)
}
func (p *Importer) openFile(path string) ([]byte, error) {
if f := p.ctxt.OpenFile; f != nil {
file, err := f(path)
if err == nil {
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err == nil {
return buf, nil
}
}
}
return ioutil.ReadFile(path)
}
func (p *Importer) ParseFile(fileName string, parseFuncBodies parser.InFuncBodies) (*ast.File, error) {
astFiles, err := p.parseFiles("", []string{fileName}, p.mode, parseFuncBodies)
if err != nil {
return nil, err
}
return astFiles[0], nil
}
func (p *Importer) ParseDir(dir string) ([]*ast.File, error) {
list, err := p.readDir(dir)
if err != nil {
return nil, err
}
fileNames := make([]string, 0, len(list))
for _, f := range list {
if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") && !strings.HasPrefix(f.Name(), ".") {
fileNames = append(fileNames, f.Name())
}
}
return p.parseFiles(dir, fileNames, p.mode, nil)
}
func (p *Importer) PathEnclosingInterval(fileName string, start, end token.Pos) []ast.Node {
if f, ok := p.astPkgs.cachedFile(fileName); ok {
nodes, _ := PathEnclosingInterval(f, start, end)
return nodes
}
return []ast.Node{}
}
func (p *Importer) GetCachedPackage(pkgName string) (*ast.Package, bool) {
return p.astPkgs.cachedPackage(pkgName)
}
| {
open := p.ctxt.OpenFile // possibly nil
files := make([]*ast.File, len(filenames))
errors := make([]error, len(filenames))
var wg sync.WaitGroup
wg.Add(len(filenames))
for i, filename := range filenames {
go func(i int, filepath string) {
defer wg.Done()
file, cached := p.astPkgs.cachedFile(filepath)
if cached {
files[i], errors[i] = file, nil
} else {
if open != nil {
src, err := open(filepath)
if err != nil {
errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err)
return
}
files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, mode, parseFuncBodies)
src.Close() // ignore Close error - parsing may have succeeded which is all we need
} else {
// Special-case when ctxt doesn't provide a custom OpenFile and use the
// parser's file reading mechanism directly. This appears to be quite a
// bit faster than opening the file and providing an io.ReaderCloser in
// both cases.
// TODO(gri) investigate performance difference (issue #19281)
files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, mode, parseFuncBodies)
}
if errors[i] == nil {
p.astPkgs.cacheFile(filepath, files[i])
}
}
}(i, p.joinPath(dir, filename))
}
wg.Wait()
// if there are errors, return the first one for deterministic results
for _, err := range errors {
if err != nil {
return nil, err
}
}
return files, nil
} | identifier_body |
importer.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package srcimporter implements importing directly
// from source files rather than installed packages.
package imports
import (
"fmt"
"github.com/JohnWall2016/gogetdef/parser"
"github.com/JohnWall2016/gogetdef/types"
"go/ast"
"go/build"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
// An Importer provides the context for importing packages from source code.
type Importer struct {
ctxt *build.Context
fset *token.FileSet
sizes types.Sizes | astPkgs *astPkgCache
info *types.Info
IncludeTests func(pkg string) bool
mode parser.Mode
}
type astPkgCache struct {
sync.RWMutex
packages map[string]*ast.Package
}
func (c *astPkgCache) cachedFile(name string) (*ast.File, bool) {
c.RLock()
defer c.RUnlock()
for _, pkg := range c.packages {
f, cached := pkg.Files[name]
if cached {
return f, cached
}
}
return nil, false
}
func (c *astPkgCache) cacheFile(name string, file *ast.File) {
c.Lock()
defer c.Unlock()
pkgName := file.Name.Name
if pkg, ok := c.packages[pkgName]; ok {
pkg.Files[name] = file
} else {
pkg = &ast.Package{
Name: pkgName,
Files: map[string]*ast.File{
name: file,
},
}
c.packages[pkgName] = pkg
}
}
func (c *astPkgCache) cachedPackage(pkgName string) (pkg *ast.Package, ok bool) {
c.RLock()
defer c.RUnlock()
pkg, ok = c.packages[pkgName]
return
}
// NewImporter returns a new Importer for the given context, file set, and map
// of packages. The context is used to resolve import paths to package paths,
// and identifying the files belonging to the package. If the context provides
// non-nil file system functions, they are used instead of the regular package
// os functions. The file set is used to track position information of package
// files; and imported packages are added to the packages map.
func NewImporter(ctxt *build.Context, fset *token.FileSet, info *types.Info, mode parser.Mode) *Importer {
return &Importer{
ctxt: ctxt,
fset: fset,
sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found
typPkgs: make(map[string]*types.Package),
astPkgs: &astPkgCache{packages: make(map[string]*ast.Package)},
info: info,
mode: mode,
}
}
// Importing is a sentinel taking the place in Importer.packages
// for a package that is in the process of being imported.
var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0).
func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", types.NoCheckCycleInDecl|types.NoCheckUsage)
}
// ImportFrom imports the package with the given import path resolved from the given srcDir,
// adds the new package to the set of packages maintained by the importer, and returns the
// package. Package path resolution and file system operations are controlled by the context
// maintained with the importer. The import mode must be zero but is otherwise ignored.
// Packages that are not comprised entirely of pure Go files may fail to import because the
// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies).
func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
// determine package path (do vendor resolution)
var bp *build.Package
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
}
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
// package unsafe is known to the type checker
if bp.ImportPath == "unsafe" {
return types.Unsafe, nil
}
// no need to re-import if the package was imported completely before
pkg := p.typPkgs[bp.ImportPath]
if pkg != nil {
if pkg == &importing {
return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath)
}
if !pkg.Complete() {
// Package exists but is not complete - we cannot handle this
// at the moment since the source importer replaces the package
// wholesale rather than augmenting it (see #19337 for details).
// Return incomplete package with error (see #16088).
return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath)
}
return pkg, nil
}
p.typPkgs[bp.ImportPath] = &importing
defer func() {
// clean up in case of error
// TODO(gri) Eventually we may want to leave a (possibly empty)
// package in the map in all cases (and use that package to
// identify cycles). See also issue 16088.
if p.typPkgs[bp.ImportPath] == &importing {
p.typPkgs[bp.ImportPath] = nil
}
}()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string
filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...)
if p.IncludeTests != nil && p.IncludeTests(bp.ImportPath) {
filenames = append(filenames, bp.TestGoFiles...)
}
files, err := p.parseFiles(bp.Dir, filenames, p.mode, nil)
if err != nil {
return nil, err
}
// type-check package files
var firstHardErr error
conf := types.Config{
CheckFuncBodies: nil,
FakeImportC: true,
// continue type-checking after the first error
Error: func(err error) {
if firstHardErr == nil && !err.(types.Error).Soft {
firstHardErr = err
}
},
Importer: p,
Sizes: p.sizes,
}
pkg, err = conf.Check(bp.ImportPath, p.fset, files, p.info, mode)
if err != nil {
// If there was a hard error it is possibly unsafe
// to use the package as it may not be fully populated.
// Do not return it (see also #20837, #20855).
if firstHardErr != nil {
pkg = nil
err = firstHardErr // give preference to first hard error over any soft error
}
return pkg, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err)
}
if firstHardErr != nil {
// this can only happen if we have a bug in go/types
panic("package is not safe yet no error was returned")
}
p.typPkgs[bp.ImportPath] = pkg
return pkg, nil
}
func (p *Importer) parseFiles(dir string, filenames []string, mode parser.Mode, parseFuncBodies parser.InFuncBodies) ([]*ast.File, error) {
open := p.ctxt.OpenFile // possibly nil
files := make([]*ast.File, len(filenames))
errors := make([]error, len(filenames))
var wg sync.WaitGroup
wg.Add(len(filenames))
for i, filename := range filenames {
go func(i int, filepath string) {
defer wg.Done()
file, cached := p.astPkgs.cachedFile(filepath)
if cached {
files[i], errors[i] = file, nil
} else {
if open != nil {
src, err := open(filepath)
if err != nil {
errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err)
return
}
files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, mode, parseFuncBodies)
src.Close() // ignore Close error - parsing may have succeeded which is all we need
} else {
// Special-case when ctxt doesn't provide a custom OpenFile and use the
// parser's file reading mechanism directly. This appears to be quite a
// bit faster than opening the file and providing an io.ReaderCloser in
// both cases.
// TODO(gri) investigate performance difference (issue #19281)
files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, mode, parseFuncBodies)
}
if errors[i] == nil {
p.astPkgs.cacheFile(filepath, files[i])
}
}
}(i, p.joinPath(dir, filename))
}
wg.Wait()
// if there are errors, return the first one for deterministic results
for _, err := range errors {
if err != nil {
return nil, err
}
}
return files, nil
}
// context-controlled file system operations
func (p *Importer) absPath(path string) (string, error) {
// TODO(gri) This should be using p.ctxt.AbsPath which doesn't
// exist but probably should. See also issue #14282.
return filepath.Abs(path)
}
func (p *Importer) isAbsPath(path string) bool {
if f := p.ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
func (p *Importer) joinPath(elem ...string) string {
if f := p.ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
func (p *Importer) readDir(path string) ([]os.FileInfo, error) {
if f := p.ctxt.ReadDir; f != nil {
return f(path)
}
return ioutil.ReadDir(path)
}
func (p *Importer) openFile(path string) ([]byte, error) {
if f := p.ctxt.OpenFile; f != nil {
file, err := f(path)
if err == nil {
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err == nil {
return buf, nil
}
}
}
return ioutil.ReadFile(path)
}
func (p *Importer) ParseFile(fileName string, parseFuncBodies parser.InFuncBodies) (*ast.File, error) {
astFiles, err := p.parseFiles("", []string{fileName}, p.mode, parseFuncBodies)
if err != nil {
return nil, err
}
return astFiles[0], nil
}
func (p *Importer) ParseDir(dir string) ([]*ast.File, error) {
list, err := p.readDir(dir)
if err != nil {
return nil, err
}
fileNames := make([]string, 0, len(list))
for _, f := range list {
if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") && !strings.HasPrefix(f.Name(), ".") {
fileNames = append(fileNames, f.Name())
}
}
return p.parseFiles(dir, fileNames, p.mode, nil)
}
func (p *Importer) PathEnclosingInterval(fileName string, start, end token.Pos) []ast.Node {
if f, ok := p.astPkgs.cachedFile(fileName); ok {
nodes, _ := PathEnclosingInterval(f, start, end)
return nodes
}
return []ast.Node{}
}
func (p *Importer) GetCachedPackage(pkgName string) (*ast.Package, bool) {
return p.astPkgs.cachedPackage(pkgName)
} | typPkgs map[string]*types.Package | random_line_split |
importer.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package srcimporter implements importing directly
// from source files rather than installed packages.
package imports
import (
"fmt"
"github.com/JohnWall2016/gogetdef/parser"
"github.com/JohnWall2016/gogetdef/types"
"go/ast"
"go/build"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
// An Importer provides the context for importing packages from source code.
type Importer struct {
ctxt *build.Context
fset *token.FileSet
sizes types.Sizes
typPkgs map[string]*types.Package
astPkgs *astPkgCache
info *types.Info
IncludeTests func(pkg string) bool
mode parser.Mode
}
type astPkgCache struct {
sync.RWMutex
packages map[string]*ast.Package
}
func (c *astPkgCache) cachedFile(name string) (*ast.File, bool) {
c.RLock()
defer c.RUnlock()
for _, pkg := range c.packages {
f, cached := pkg.Files[name]
if cached {
return f, cached
}
}
return nil, false
}
func (c *astPkgCache) cacheFile(name string, file *ast.File) {
c.Lock()
defer c.Unlock()
pkgName := file.Name.Name
if pkg, ok := c.packages[pkgName]; ok {
pkg.Files[name] = file
} else {
pkg = &ast.Package{
Name: pkgName,
Files: map[string]*ast.File{
name: file,
},
}
c.packages[pkgName] = pkg
}
}
func (c *astPkgCache) cachedPackage(pkgName string) (pkg *ast.Package, ok bool) {
c.RLock()
defer c.RUnlock()
pkg, ok = c.packages[pkgName]
return
}
// NewImporter returns a new Importer for the given context, file set, and map
// of packages. The context is used to resolve import paths to package paths,
// and identifying the files belonging to the package. If the context provides
// non-nil file system functions, they are used instead of the regular package
// os functions. The file set is used to track position information of package
// files; and imported packages are added to the packages map.
func NewImporter(ctxt *build.Context, fset *token.FileSet, info *types.Info, mode parser.Mode) *Importer {
return &Importer{
ctxt: ctxt,
fset: fset,
sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found
typPkgs: make(map[string]*types.Package),
astPkgs: &astPkgCache{packages: make(map[string]*ast.Package)},
info: info,
mode: mode,
}
}
// Importing is a sentinel taking the place in Importer.packages
// for a package that is in the process of being imported.
var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0).
func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", types.NoCheckCycleInDecl|types.NoCheckUsage)
}
// ImportFrom imports the package with the given import path resolved from the given srcDir,
// adds the new package to the set of packages maintained by the importer, and returns the
// package. Package path resolution and file system operations are controlled by the context
// maintained with the importer. The import mode must be zero but is otherwise ignored.
// Packages that are not comprised entirely of pure Go files may fail to import because the
// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies).
func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
// determine package path (do vendor resolution)
var bp *build.Package
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
}
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
// package unsafe is known to the type checker
if bp.ImportPath == "unsafe" {
return types.Unsafe, nil
}
// no need to re-import if the package was imported completely before
pkg := p.typPkgs[bp.ImportPath]
if pkg != nil {
if pkg == &importing {
return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath)
}
if !pkg.Complete() {
// Package exists but is not complete - we cannot handle this
// at the moment since the source importer replaces the package
// wholesale rather than augmenting it (see #19337 for details).
// Return incomplete package with error (see #16088).
return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath)
}
return pkg, nil
}
p.typPkgs[bp.ImportPath] = &importing
defer func() {
// clean up in case of error
// TODO(gri) Eventually we may want to leave a (possibly empty)
// package in the map in all cases (and use that package to
// identify cycles). See also issue 16088.
if p.typPkgs[bp.ImportPath] == &importing {
p.typPkgs[bp.ImportPath] = nil
}
}()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string
filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...)
if p.IncludeTests != nil && p.IncludeTests(bp.ImportPath) {
filenames = append(filenames, bp.TestGoFiles...)
}
files, err := p.parseFiles(bp.Dir, filenames, p.mode, nil)
if err != nil {
return nil, err
}
// type-check package files
var firstHardErr error
conf := types.Config{
CheckFuncBodies: nil,
FakeImportC: true,
// continue type-checking after the first error
Error: func(err error) {
if firstHardErr == nil && !err.(types.Error).Soft {
firstHardErr = err
}
},
Importer: p,
Sizes: p.sizes,
}
pkg, err = conf.Check(bp.ImportPath, p.fset, files, p.info, mode)
if err != nil {
// If there was a hard error it is possibly unsafe
// to use the package as it may not be fully populated.
// Do not return it (see also #20837, #20855).
if firstHardErr != nil {
pkg = nil
err = firstHardErr // give preference to first hard error over any soft error
}
return pkg, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err)
}
if firstHardErr != nil {
// this can only happen if we have a bug in go/types
panic("package is not safe yet no error was returned")
}
p.typPkgs[bp.ImportPath] = pkg
return pkg, nil
}
func (p *Importer) parseFiles(dir string, filenames []string, mode parser.Mode, parseFuncBodies parser.InFuncBodies) ([]*ast.File, error) {
open := p.ctxt.OpenFile // possibly nil
files := make([]*ast.File, len(filenames))
errors := make([]error, len(filenames))
var wg sync.WaitGroup
wg.Add(len(filenames))
for i, filename := range filenames {
go func(i int, filepath string) {
defer wg.Done()
file, cached := p.astPkgs.cachedFile(filepath)
if cached {
files[i], errors[i] = file, nil
} else {
if open != nil {
src, err := open(filepath)
if err != nil {
errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err)
return
}
files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, mode, parseFuncBodies)
src.Close() // ignore Close error - parsing may have succeeded which is all we need
} else {
// Special-case when ctxt doesn't provide a custom OpenFile and use the
// parser's file reading mechanism directly. This appears to be quite a
// bit faster than opening the file and providing an io.ReaderCloser in
// both cases.
// TODO(gri) investigate performance difference (issue #19281)
files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, mode, parseFuncBodies)
}
if errors[i] == nil {
p.astPkgs.cacheFile(filepath, files[i])
}
}
}(i, p.joinPath(dir, filename))
}
wg.Wait()
// if there are errors, return the first one for deterministic results
for _, err := range errors {
if err != nil {
return nil, err
}
}
return files, nil
}
// context-controlled file system operations
func (p *Importer) | (path string) (string, error) {
// TODO(gri) This should be using p.ctxt.AbsPath which doesn't
// exist but probably should. See also issue #14282.
return filepath.Abs(path)
}
func (p *Importer) isAbsPath(path string) bool {
if f := p.ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
func (p *Importer) joinPath(elem ...string) string {
if f := p.ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
func (p *Importer) readDir(path string) ([]os.FileInfo, error) {
if f := p.ctxt.ReadDir; f != nil {
return f(path)
}
return ioutil.ReadDir(path)
}
func (p *Importer) openFile(path string) ([]byte, error) {
if f := p.ctxt.OpenFile; f != nil {
file, err := f(path)
if err == nil {
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err == nil {
return buf, nil
}
}
}
return ioutil.ReadFile(path)
}
func (p *Importer) ParseFile(fileName string, parseFuncBodies parser.InFuncBodies) (*ast.File, error) {
astFiles, err := p.parseFiles("", []string{fileName}, p.mode, parseFuncBodies)
if err != nil {
return nil, err
}
return astFiles[0], nil
}
func (p *Importer) ParseDir(dir string) ([]*ast.File, error) {
list, err := p.readDir(dir)
if err != nil {
return nil, err
}
fileNames := make([]string, 0, len(list))
for _, f := range list {
if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") && !strings.HasPrefix(f.Name(), ".") {
fileNames = append(fileNames, f.Name())
}
}
return p.parseFiles(dir, fileNames, p.mode, nil)
}
func (p *Importer) PathEnclosingInterval(fileName string, start, end token.Pos) []ast.Node {
if f, ok := p.astPkgs.cachedFile(fileName); ok {
nodes, _ := PathEnclosingInterval(f, start, end)
return nodes
}
return []ast.Node{}
}
func (p *Importer) GetCachedPackage(pkgName string) (*ast.Package, bool) {
return p.astPkgs.cachedPackage(pkgName)
}
| absPath | identifier_name |
importer.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package srcimporter implements importing directly
// from source files rather than installed packages.
package imports
import (
"fmt"
"github.com/JohnWall2016/gogetdef/parser"
"github.com/JohnWall2016/gogetdef/types"
"go/ast"
"go/build"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
// An Importer provides the context for importing packages from source code.
type Importer struct {
ctxt *build.Context
fset *token.FileSet
sizes types.Sizes
typPkgs map[string]*types.Package
astPkgs *astPkgCache
info *types.Info
IncludeTests func(pkg string) bool
mode parser.Mode
}
type astPkgCache struct {
sync.RWMutex
packages map[string]*ast.Package
}
func (c *astPkgCache) cachedFile(name string) (*ast.File, bool) {
c.RLock()
defer c.RUnlock()
for _, pkg := range c.packages {
f, cached := pkg.Files[name]
if cached {
return f, cached
}
}
return nil, false
}
func (c *astPkgCache) cacheFile(name string, file *ast.File) {
c.Lock()
defer c.Unlock()
pkgName := file.Name.Name
if pkg, ok := c.packages[pkgName]; ok {
pkg.Files[name] = file
} else {
pkg = &ast.Package{
Name: pkgName,
Files: map[string]*ast.File{
name: file,
},
}
c.packages[pkgName] = pkg
}
}
func (c *astPkgCache) cachedPackage(pkgName string) (pkg *ast.Package, ok bool) {
c.RLock()
defer c.RUnlock()
pkg, ok = c.packages[pkgName]
return
}
// NewImporter returns a new Importer for the given context, file set, and map
// of packages. The context is used to resolve import paths to package paths,
// and identifying the files belonging to the package. If the context provides
// non-nil file system functions, they are used instead of the regular package
// os functions. The file set is used to track position information of package
// files; and imported packages are added to the packages map.
func NewImporter(ctxt *build.Context, fset *token.FileSet, info *types.Info, mode parser.Mode) *Importer {
return &Importer{
ctxt: ctxt,
fset: fset,
sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found
typPkgs: make(map[string]*types.Package),
astPkgs: &astPkgCache{packages: make(map[string]*ast.Package)},
info: info,
mode: mode,
}
}
// Importing is a sentinel taking the place in Importer.packages
// for a package that is in the process of being imported.
var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0).
func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", types.NoCheckCycleInDecl|types.NoCheckUsage)
}
// ImportFrom imports the package with the given import path resolved from the given srcDir,
// adds the new package to the set of packages maintained by the importer, and returns the
// package. Package path resolution and file system operations are controlled by the context
// maintained with the importer. The import mode must be zero but is otherwise ignored.
// Packages that are not comprised entirely of pure Go files may fail to import because the
// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies).
func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
// determine package path (do vendor resolution)
var bp *build.Package
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
}
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
// package unsafe is known to the type checker
if bp.ImportPath == "unsafe" {
return types.Unsafe, nil
}
// no need to re-import if the package was imported completely before
pkg := p.typPkgs[bp.ImportPath]
if pkg != nil {
if pkg == &importing {
return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath)
}
if !pkg.Complete() {
// Package exists but is not complete - we cannot handle this
// at the moment since the source importer replaces the package
// wholesale rather than augmenting it (see #19337 for details).
// Return incomplete package with error (see #16088).
return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath)
}
return pkg, nil
}
p.typPkgs[bp.ImportPath] = &importing
defer func() {
// clean up in case of error
// TODO(gri) Eventually we may want to leave a (possibly empty)
// package in the map in all cases (and use that package to
// identify cycles). See also issue 16088.
if p.typPkgs[bp.ImportPath] == &importing {
p.typPkgs[bp.ImportPath] = nil
}
}()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string
filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...)
if p.IncludeTests != nil && p.IncludeTests(bp.ImportPath) {
filenames = append(filenames, bp.TestGoFiles...)
}
files, err := p.parseFiles(bp.Dir, filenames, p.mode, nil)
if err != nil {
return nil, err
}
// type-check package files
var firstHardErr error
conf := types.Config{
CheckFuncBodies: nil,
FakeImportC: true,
// continue type-checking after the first error
Error: func(err error) {
if firstHardErr == nil && !err.(types.Error).Soft {
firstHardErr = err
}
},
Importer: p,
Sizes: p.sizes,
}
pkg, err = conf.Check(bp.ImportPath, p.fset, files, p.info, mode)
if err != nil {
// If there was a hard error it is possibly unsafe
// to use the package as it may not be fully populated.
// Do not return it (see also #20837, #20855).
if firstHardErr != nil {
pkg = nil
err = firstHardErr // give preference to first hard error over any soft error
}
return pkg, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err)
}
if firstHardErr != nil |
p.typPkgs[bp.ImportPath] = pkg
return pkg, nil
}
func (p *Importer) parseFiles(dir string, filenames []string, mode parser.Mode, parseFuncBodies parser.InFuncBodies) ([]*ast.File, error) {
open := p.ctxt.OpenFile // possibly nil
files := make([]*ast.File, len(filenames))
errors := make([]error, len(filenames))
var wg sync.WaitGroup
wg.Add(len(filenames))
for i, filename := range filenames {
go func(i int, filepath string) {
defer wg.Done()
file, cached := p.astPkgs.cachedFile(filepath)
if cached {
files[i], errors[i] = file, nil
} else {
if open != nil {
src, err := open(filepath)
if err != nil {
errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err)
return
}
files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, mode, parseFuncBodies)
src.Close() // ignore Close error - parsing may have succeeded which is all we need
} else {
// Special-case when ctxt doesn't provide a custom OpenFile and use the
// parser's file reading mechanism directly. This appears to be quite a
// bit faster than opening the file and providing an io.ReaderCloser in
// both cases.
// TODO(gri) investigate performance difference (issue #19281)
files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, mode, parseFuncBodies)
}
if errors[i] == nil {
p.astPkgs.cacheFile(filepath, files[i])
}
}
}(i, p.joinPath(dir, filename))
}
wg.Wait()
// if there are errors, return the first one for deterministic results
for _, err := range errors {
if err != nil {
return nil, err
}
}
return files, nil
}
// context-controlled file system operations
func (p *Importer) absPath(path string) (string, error) {
// TODO(gri) This should be using p.ctxt.AbsPath which doesn't
// exist but probably should. See also issue #14282.
return filepath.Abs(path)
}
func (p *Importer) isAbsPath(path string) bool {
if f := p.ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
func (p *Importer) joinPath(elem ...string) string {
if f := p.ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
func (p *Importer) readDir(path string) ([]os.FileInfo, error) {
if f := p.ctxt.ReadDir; f != nil {
return f(path)
}
return ioutil.ReadDir(path)
}
func (p *Importer) openFile(path string) ([]byte, error) {
if f := p.ctxt.OpenFile; f != nil {
file, err := f(path)
if err == nil {
defer file.Close()
buf, err := ioutil.ReadAll(file)
if err == nil {
return buf, nil
}
}
}
return ioutil.ReadFile(path)
}
func (p *Importer) ParseFile(fileName string, parseFuncBodies parser.InFuncBodies) (*ast.File, error) {
astFiles, err := p.parseFiles("", []string{fileName}, p.mode, parseFuncBodies)
if err != nil {
return nil, err
}
return astFiles[0], nil
}
func (p *Importer) ParseDir(dir string) ([]*ast.File, error) {
list, err := p.readDir(dir)
if err != nil {
return nil, err
}
fileNames := make([]string, 0, len(list))
for _, f := range list {
if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") && !strings.HasPrefix(f.Name(), ".") {
fileNames = append(fileNames, f.Name())
}
}
return p.parseFiles(dir, fileNames, p.mode, nil)
}
func (p *Importer) PathEnclosingInterval(fileName string, start, end token.Pos) []ast.Node {
if f, ok := p.astPkgs.cachedFile(fileName); ok {
nodes, _ := PathEnclosingInterval(f, start, end)
return nodes
}
return []ast.Node{}
}
func (p *Importer) GetCachedPackage(pkgName string) (*ast.Package, bool) {
return p.astPkgs.cachedPackage(pkgName)
}
| {
// this can only happen if we have a bug in go/types
panic("package is not safe yet no error was returned")
} | conditional_block |
springbootapp_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/common/log"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "k8s.io/api/apps/v1"
appv1 "github.com/cy18cn/spring-boot-operator/api/v1"
)
// SpringBootAppReconciler reconciles a SpringBootApp object
type SpringBootAppReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
const (
DefaultConfigVolumeName = "app-config"
DefaultMinReplicas int32 = 2
DefaultMaxReplicas int32 = 6
)
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=services/status,verbs=get
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers/status,verbs=get
func (r *SpringBootAppReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
_ = r.Log.WithValues("springbootapp", req.NamespacedName)
springBootApp := &appv1.SpringBootApp{}
if err := r.Get(ctx, req.NamespacedName, springBootApp); err != nil {
if errors.IsNotFound(err) {
log.Error(err, "unable to found SpringBootAPP")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error request get springBootApp, requeue the request
return ctrl.Result{}, err
}
labels := map[string]string{
"app": springBootApp.Name,
"version": springBootApp.Spec.Version,
"deployment": fmt.Sprintf("%s-deployment", springBootApp.Name),
}
err := r.createOrUpdateDeployment(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdaterService(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdateHPA(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *SpringBootAppReconciler) | (mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appv1.SpringBootApp{}).
Complete(r)
}
func (r *SpringBootAppReconciler) createOrUpdateDeployment(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// Define the desired Deployment object
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", springBootApp.Name, springBootApp.Spec.Version),
Namespace: springBootApp.Namespace,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: springBootApp.Spec.MinReplicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
springBootContainer(springBootApp),
},
Volumes: []corev1.Volume{
{
Name: DefaultConfigVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: springBootApp.Spec.ConfigMap,
},
},
},
},
},
},
},
},
}
imagePullSecrets := imagePullSecrets(springBootApp.Spec.ImagePullSecrets)
if imagePullSecrets != nil {
deployment.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets
}
affinity := podAffinity(springBootApp.Spec.PodAffinity, springBootApp.Spec.PodAntiAffinity)
if affinity != nil {
deployment.Spec.Template.Spec.Affinity = affinity
}
if err := controllerutil.SetControllerReference(springBootApp, deployment, r.Scheme); err != nil {
return err
}
// check if deployment is existed
foundedDep := &appsv1.Deployment{}
if err := r.Get(
ctx,
types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace},
foundedDep,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Deployment", "namespace", deployment.Namespace, "name", deployment.Name)
err = r.Create(ctx, deployment)
}
return err
}
// update the found object and write result back if it is changed
if !reflect.DeepEqual(deployment, foundedDep) {
foundedDep.Spec = deployment.Spec
log.Info("Updating deployment", "namespace", foundedDep.Namespace, "name", foundedDep.Name)
err := r.Update(ctx, foundedDep)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdaterService(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Namespace: springBootApp.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": springBootApp.Name,
},
},
}
if springBootApp.Spec.Ports != nil {
var ports []corev1.ServicePort
for _, port := range springBootApp.Spec.Ports {
ports = append(ports, corev1.ServicePort{
Name: "http",
Port: port.ContainerPort,
TargetPort: intstr.IntOrString{IntVal: port.ContainerPort},
})
}
svc.Spec.Ports = ports
} else {
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "http",
Port: 8080,
TargetPort: intstr.IntOrString{IntVal: 8080},
},
}
}
if err := controllerutil.SetControllerReference(springBootApp, svc, r.Scheme); err != nil {
return err
}
foundedSvc := &corev1.Service{}
if err := r.Get(
ctx,
types.NamespacedName{Name: springBootApp.Name, Namespace: springBootApp.Namespace},
foundedSvc,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", springBootApp.Namespace, "name", springBootApp.Name)
err = r.Create(ctx, svc)
}
return err
}
if !reflect.DeepEqual(foundedSvc.Spec.Ports, svc.Spec.Ports) {
foundedSvc.Spec.Ports = svc.Spec.Ports
log.Info("Updating Service", "namespace", foundedSvc.Namespace, "name", foundedSvc.Name)
err := r.Update(ctx, foundedSvc)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdateHPA(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// hpa
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Labels: labels,
Namespace: springBootApp.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "Deployment",
Name: springBootApp.Name,
APIVersion: "apps/v1",
},
},
}
if springBootApp.Spec.MinReplicas != nil {
hpa.Spec.MinReplicas = springBootApp.Spec.MinReplicas
} else {
minReplicas := DefaultMinReplicas
hpa.Spec.MinReplicas = &minReplicas
}
if springBootApp.Spec.MaxReplicas != nil {
hpa.Spec.MaxReplicas = *springBootApp.Spec.MaxReplicas
} else {
hpa.Spec.MaxReplicas = DefaultMaxReplicas
}
if springBootApp.Spec.TargetCPUUtilizationPercentage != nil {
hpa.Spec.TargetCPUUtilizationPercentage = springBootApp.Spec.TargetCPUUtilizationPercentage
}
// SetControllerReference sets owner as a Controller OwnerReference on controlled.
if err := controllerutil.SetControllerReference(springBootApp, hpa, r.Scheme); err != nil {
return err
}
foundedHPA := &autoscalingv1.HorizontalPodAutoscaler{}
if err := r.Get(
ctx,
types.NamespacedName{Name: hpa.Name, Namespace: hpa.Namespace},
foundedHPA,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating HorizontalPodAutoscaler", "namespace", hpa.Namespace, "name", hpa.Name)
err = r.Create(ctx, hpa)
}
return err
}
if foundedHPA.Spec.MinReplicas != hpa.Spec.MinReplicas ||
foundedHPA.Spec.MaxReplicas != hpa.Spec.MaxReplicas ||
foundedHPA.Spec.TargetCPUUtilizationPercentage != hpa.Spec.TargetCPUUtilizationPercentage {
foundedHPA.Spec.MinReplicas = hpa.Spec.MinReplicas
foundedHPA.Spec.MaxReplicas = hpa.Spec.MaxReplicas
foundedHPA.Spec.TargetCPUUtilizationPercentage = hpa.Spec.TargetCPUUtilizationPercentage
log.Info("Updating Service", "namespace", hpa.Namespace, "name", hpa.Name)
err := r.Update(ctx, foundedHPA)
if err != nil {
return err
}
}
return nil
}
func springBootContainer(springBootApp *appv1.SpringBootApp) corev1.Container {
container := corev1.Container{
Name: springBootApp.Name,
Image: fmt.Sprintf("%s:%s", springBootApp.Spec.ImageRepo, springBootApp.Spec.AppImage),
Ports: springBootApp.Spec.Ports,
Env: springBootApp.Spec.Env,
}
if len(springBootApp.Spec.LivenessProbePath) > 0 {
container.LivenessProbe = buildProbe(springBootApp.Spec.LivenessProbePath, 300, 30)
}
if len(springBootApp.Spec.ReadinessProbePath) > 0 {
container.ReadinessProbe = buildProbe(springBootApp.Spec.ReadinessProbePath, 30, 5)
}
if len(springBootApp.Spec.ConfigMap) > 0 {
container.VolumeMounts = []corev1.VolumeMount{
{
Name: DefaultConfigVolumeName,
ReadOnly: true,
MountPath: "/usr/local/app/config/application.yml",
SubPath: "application.yml",
},
}
}
if springBootApp.Spec.Resources != nil {
container.Resources = *springBootApp.Spec.Resources
} else {
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(100, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(512, resource.BinarySI),
},
}
}
if springBootApp.Spec.ImagePullPolicy != nil {
container.ImagePullPolicy = *springBootApp.Spec.ImagePullPolicy
} else {
container.ImagePullPolicy = corev1.PullIfNotPresent
}
return container
}
const (
DefaultFailureThreshold = 5
DefaultSuccessThreshold = 1
DefaultTimeoutSeconds = 5
)
func buildProbe(path string, delay, timeout int32) *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: path,
Port: intstr.IntOrString{IntVal: 8080},
Scheme: corev1.URISchemeHTTP,
},
},
FailureThreshold: DefaultFailureThreshold,
InitialDelaySeconds: delay,
SuccessThreshold: DefaultSuccessThreshold,
PeriodSeconds: timeout,
TimeoutSeconds: DefaultTimeoutSeconds,
}
}
func podAffinity(podAffinity *corev1.PodAffinity, podAntiAffinity *corev1.PodAntiAffinity) *corev1.Affinity {
if podAffinity == nil && podAntiAffinity == nil {
return nil
}
affinity := &corev1.Affinity{}
if podAffinity != nil {
affinity.PodAffinity = podAffinity
}
if podAntiAffinity != nil {
affinity.PodAntiAffinity = podAntiAffinity
}
return affinity
}
func imagePullSecrets(imagePullSecrets string) []corev1.LocalObjectReference {
if len(imagePullSecrets) == 0 {
return nil
}
return []corev1.LocalObjectReference{
{Name: imagePullSecrets},
}
}
| SetupWithManager | identifier_name |
springbootapp_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/common/log"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "k8s.io/api/apps/v1"
appv1 "github.com/cy18cn/spring-boot-operator/api/v1"
)
// SpringBootAppReconciler reconciles a SpringBootApp object
type SpringBootAppReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
const (
DefaultConfigVolumeName = "app-config"
DefaultMinReplicas int32 = 2
DefaultMaxReplicas int32 = 6
)
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=services/status,verbs=get
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers/status,verbs=get
func (r *SpringBootAppReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
_ = r.Log.WithValues("springbootapp", req.NamespacedName)
springBootApp := &appv1.SpringBootApp{}
if err := r.Get(ctx, req.NamespacedName, springBootApp); err != nil {
if errors.IsNotFound(err) {
log.Error(err, "unable to found SpringBootAPP")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error request get springBootApp, requeue the request
return ctrl.Result{}, err
}
labels := map[string]string{
"app": springBootApp.Name,
"version": springBootApp.Spec.Version,
"deployment": fmt.Sprintf("%s-deployment", springBootApp.Name),
}
err := r.createOrUpdateDeployment(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdaterService(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdateHPA(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *SpringBootAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appv1.SpringBootApp{}).
Complete(r)
}
func (r *SpringBootAppReconciler) createOrUpdateDeployment(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// Define the desired Deployment object
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", springBootApp.Name, springBootApp.Spec.Version),
Namespace: springBootApp.Namespace,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: springBootApp.Spec.MinReplicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
springBootContainer(springBootApp),
},
Volumes: []corev1.Volume{
{
Name: DefaultConfigVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: springBootApp.Spec.ConfigMap,
},
},
},
},
},
},
},
},
}
imagePullSecrets := imagePullSecrets(springBootApp.Spec.ImagePullSecrets)
if imagePullSecrets != nil {
deployment.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets
}
affinity := podAffinity(springBootApp.Spec.PodAffinity, springBootApp.Spec.PodAntiAffinity)
if affinity != nil {
deployment.Spec.Template.Spec.Affinity = affinity
}
if err := controllerutil.SetControllerReference(springBootApp, deployment, r.Scheme); err != nil {
return err
}
// check if deployment is existed
foundedDep := &appsv1.Deployment{}
if err := r.Get(
ctx,
types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace},
foundedDep,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Deployment", "namespace", deployment.Namespace, "name", deployment.Name)
err = r.Create(ctx, deployment)
}
return err
}
// update the found object and write result back if it is changed
if !reflect.DeepEqual(deployment, foundedDep) {
foundedDep.Spec = deployment.Spec
log.Info("Updating deployment", "namespace", foundedDep.Namespace, "name", foundedDep.Name)
err := r.Update(ctx, foundedDep)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdaterService(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Namespace: springBootApp.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": springBootApp.Name,
},
},
}
if springBootApp.Spec.Ports != nil {
var ports []corev1.ServicePort
for _, port := range springBootApp.Spec.Ports {
ports = append(ports, corev1.ServicePort{
Name: "http",
Port: port.ContainerPort,
TargetPort: intstr.IntOrString{IntVal: port.ContainerPort},
})
}
svc.Spec.Ports = ports
} else {
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "http",
Port: 8080,
TargetPort: intstr.IntOrString{IntVal: 8080},
},
}
}
if err := controllerutil.SetControllerReference(springBootApp, svc, r.Scheme); err != nil {
return err
}
foundedSvc := &corev1.Service{}
if err := r.Get(
ctx,
types.NamespacedName{Name: springBootApp.Name, Namespace: springBootApp.Namespace},
foundedSvc,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", springBootApp.Namespace, "name", springBootApp.Name)
err = r.Create(ctx, svc)
}
return err
}
if !reflect.DeepEqual(foundedSvc.Spec.Ports, svc.Spec.Ports) {
foundedSvc.Spec.Ports = svc.Spec.Ports
log.Info("Updating Service", "namespace", foundedSvc.Namespace, "name", foundedSvc.Name)
err := r.Update(ctx, foundedSvc)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdateHPA(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// hpa
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Labels: labels,
Namespace: springBootApp.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "Deployment",
Name: springBootApp.Name,
APIVersion: "apps/v1",
},
},
}
if springBootApp.Spec.MinReplicas != nil {
hpa.Spec.MinReplicas = springBootApp.Spec.MinReplicas
} else {
minReplicas := DefaultMinReplicas
hpa.Spec.MinReplicas = &minReplicas
}
if springBootApp.Spec.MaxReplicas != nil {
hpa.Spec.MaxReplicas = *springBootApp.Spec.MaxReplicas
} else {
hpa.Spec.MaxReplicas = DefaultMaxReplicas
}
if springBootApp.Spec.TargetCPUUtilizationPercentage != nil {
hpa.Spec.TargetCPUUtilizationPercentage = springBootApp.Spec.TargetCPUUtilizationPercentage
}
// SetControllerReference sets owner as a Controller OwnerReference on controlled.
if err := controllerutil.SetControllerReference(springBootApp, hpa, r.Scheme); err != nil {
return err
}
foundedHPA := &autoscalingv1.HorizontalPodAutoscaler{}
if err := r.Get(
ctx,
types.NamespacedName{Name: hpa.Name, Namespace: hpa.Namespace},
foundedHPA,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating HorizontalPodAutoscaler", "namespace", hpa.Namespace, "name", hpa.Name)
err = r.Create(ctx, hpa)
}
return err
}
if foundedHPA.Spec.MinReplicas != hpa.Spec.MinReplicas ||
foundedHPA.Spec.MaxReplicas != hpa.Spec.MaxReplicas ||
foundedHPA.Spec.TargetCPUUtilizationPercentage != hpa.Spec.TargetCPUUtilizationPercentage {
foundedHPA.Spec.MinReplicas = hpa.Spec.MinReplicas
foundedHPA.Spec.MaxReplicas = hpa.Spec.MaxReplicas
foundedHPA.Spec.TargetCPUUtilizationPercentage = hpa.Spec.TargetCPUUtilizationPercentage
log.Info("Updating Service", "namespace", hpa.Namespace, "name", hpa.Name)
err := r.Update(ctx, foundedHPA)
if err != nil {
return err
}
}
return nil
}
func springBootContainer(springBootApp *appv1.SpringBootApp) corev1.Container |
const (
DefaultFailureThreshold = 5
DefaultSuccessThreshold = 1
DefaultTimeoutSeconds = 5
)
func buildProbe(path string, delay, timeout int32) *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: path,
Port: intstr.IntOrString{IntVal: 8080},
Scheme: corev1.URISchemeHTTP,
},
},
FailureThreshold: DefaultFailureThreshold,
InitialDelaySeconds: delay,
SuccessThreshold: DefaultSuccessThreshold,
PeriodSeconds: timeout,
TimeoutSeconds: DefaultTimeoutSeconds,
}
}
func podAffinity(podAffinity *corev1.PodAffinity, podAntiAffinity *corev1.PodAntiAffinity) *corev1.Affinity {
if podAffinity == nil && podAntiAffinity == nil {
return nil
}
affinity := &corev1.Affinity{}
if podAffinity != nil {
affinity.PodAffinity = podAffinity
}
if podAntiAffinity != nil {
affinity.PodAntiAffinity = podAntiAffinity
}
return affinity
}
func imagePullSecrets(imagePullSecrets string) []corev1.LocalObjectReference {
if len(imagePullSecrets) == 0 {
return nil
}
return []corev1.LocalObjectReference{
{Name: imagePullSecrets},
}
}
| {
container := corev1.Container{
Name: springBootApp.Name,
Image: fmt.Sprintf("%s:%s", springBootApp.Spec.ImageRepo, springBootApp.Spec.AppImage),
Ports: springBootApp.Spec.Ports,
Env: springBootApp.Spec.Env,
}
if len(springBootApp.Spec.LivenessProbePath) > 0 {
container.LivenessProbe = buildProbe(springBootApp.Spec.LivenessProbePath, 300, 30)
}
if len(springBootApp.Spec.ReadinessProbePath) > 0 {
container.ReadinessProbe = buildProbe(springBootApp.Spec.ReadinessProbePath, 30, 5)
}
if len(springBootApp.Spec.ConfigMap) > 0 {
container.VolumeMounts = []corev1.VolumeMount{
{
Name: DefaultConfigVolumeName,
ReadOnly: true,
MountPath: "/usr/local/app/config/application.yml",
SubPath: "application.yml",
},
}
}
if springBootApp.Spec.Resources != nil {
container.Resources = *springBootApp.Spec.Resources
} else {
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(100, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(512, resource.BinarySI),
},
}
}
if springBootApp.Spec.ImagePullPolicy != nil {
container.ImagePullPolicy = *springBootApp.Spec.ImagePullPolicy
} else {
container.ImagePullPolicy = corev1.PullIfNotPresent
}
return container
} | identifier_body |
springbootapp_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/common/log"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "k8s.io/api/apps/v1"
appv1 "github.com/cy18cn/spring-boot-operator/api/v1"
)
// SpringBootAppReconciler reconciles a SpringBootApp object
type SpringBootAppReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
const (
DefaultConfigVolumeName = "app-config"
DefaultMinReplicas int32 = 2
DefaultMaxReplicas int32 = 6
)
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=services/status,verbs=get
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers/status,verbs=get
func (r *SpringBootAppReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
_ = r.Log.WithValues("springbootapp", req.NamespacedName)
springBootApp := &appv1.SpringBootApp{}
if err := r.Get(ctx, req.NamespacedName, springBootApp); err != nil {
if errors.IsNotFound(err) {
log.Error(err, "unable to found SpringBootAPP")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error request get springBootApp, requeue the request
return ctrl.Result{}, err
}
labels := map[string]string{
"app": springBootApp.Name,
"version": springBootApp.Spec.Version,
"deployment": fmt.Sprintf("%s-deployment", springBootApp.Name),
}
err := r.createOrUpdateDeployment(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdaterService(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdateHPA(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *SpringBootAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appv1.SpringBootApp{}).
Complete(r)
}
func (r *SpringBootAppReconciler) createOrUpdateDeployment(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// Define the desired Deployment object
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", springBootApp.Name, springBootApp.Spec.Version),
Namespace: springBootApp.Namespace,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: springBootApp.Spec.MinReplicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
springBootContainer(springBootApp),
},
Volumes: []corev1.Volume{
{
Name: DefaultConfigVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: springBootApp.Spec.ConfigMap,
},
},
},
},
},
},
},
},
}
imagePullSecrets := imagePullSecrets(springBootApp.Spec.ImagePullSecrets)
if imagePullSecrets != nil {
deployment.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets
}
affinity := podAffinity(springBootApp.Spec.PodAffinity, springBootApp.Spec.PodAntiAffinity)
if affinity != nil {
deployment.Spec.Template.Spec.Affinity = affinity
}
if err := controllerutil.SetControllerReference(springBootApp, deployment, r.Scheme); err != nil {
return err
}
// check if deployment is existed
foundedDep := &appsv1.Deployment{}
if err := r.Get(
ctx,
types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace},
foundedDep,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Deployment", "namespace", deployment.Namespace, "name", deployment.Name)
err = r.Create(ctx, deployment)
}
return err
}
// update the found object and write result back if it is changed
if !reflect.DeepEqual(deployment, foundedDep) {
foundedDep.Spec = deployment.Spec
log.Info("Updating deployment", "namespace", foundedDep.Namespace, "name", foundedDep.Name)
err := r.Update(ctx, foundedDep)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdaterService(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Namespace: springBootApp.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": springBootApp.Name,
},
},
}
if springBootApp.Spec.Ports != nil {
var ports []corev1.ServicePort
for _, port := range springBootApp.Spec.Ports {
ports = append(ports, corev1.ServicePort{
Name: "http",
Port: port.ContainerPort,
TargetPort: intstr.IntOrString{IntVal: port.ContainerPort},
})
}
svc.Spec.Ports = ports
} else {
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "http",
Port: 8080,
TargetPort: intstr.IntOrString{IntVal: 8080},
},
}
}
if err := controllerutil.SetControllerReference(springBootApp, svc, r.Scheme); err != nil {
return err
}
foundedSvc := &corev1.Service{}
if err := r.Get(
ctx,
types.NamespacedName{Name: springBootApp.Name, Namespace: springBootApp.Namespace},
foundedSvc,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", springBootApp.Namespace, "name", springBootApp.Name)
err = r.Create(ctx, svc)
}
return err
}
if !reflect.DeepEqual(foundedSvc.Spec.Ports, svc.Spec.Ports) {
foundedSvc.Spec.Ports = svc.Spec.Ports
log.Info("Updating Service", "namespace", foundedSvc.Namespace, "name", foundedSvc.Name)
err := r.Update(ctx, foundedSvc)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdateHPA(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// hpa
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Labels: labels,
Namespace: springBootApp.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "Deployment",
Name: springBootApp.Name,
APIVersion: "apps/v1",
},
},
}
if springBootApp.Spec.MinReplicas != nil {
hpa.Spec.MinReplicas = springBootApp.Spec.MinReplicas
} else {
minReplicas := DefaultMinReplicas
hpa.Spec.MinReplicas = &minReplicas
}
if springBootApp.Spec.MaxReplicas != nil {
hpa.Spec.MaxReplicas = *springBootApp.Spec.MaxReplicas
} else {
hpa.Spec.MaxReplicas = DefaultMaxReplicas
}
if springBootApp.Spec.TargetCPUUtilizationPercentage != nil {
hpa.Spec.TargetCPUUtilizationPercentage = springBootApp.Spec.TargetCPUUtilizationPercentage
}
// SetControllerReference sets owner as a Controller OwnerReference on controlled.
if err := controllerutil.SetControllerReference(springBootApp, hpa, r.Scheme); err != nil {
return err
}
foundedHPA := &autoscalingv1.HorizontalPodAutoscaler{}
if err := r.Get(
ctx,
types.NamespacedName{Name: hpa.Name, Namespace: hpa.Namespace},
foundedHPA,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating HorizontalPodAutoscaler", "namespace", hpa.Namespace, "name", hpa.Name)
err = r.Create(ctx, hpa)
}
return err
}
if foundedHPA.Spec.MinReplicas != hpa.Spec.MinReplicas ||
foundedHPA.Spec.MaxReplicas != hpa.Spec.MaxReplicas ||
foundedHPA.Spec.TargetCPUUtilizationPercentage != hpa.Spec.TargetCPUUtilizationPercentage {
foundedHPA.Spec.MinReplicas = hpa.Spec.MinReplicas
foundedHPA.Spec.MaxReplicas = hpa.Spec.MaxReplicas
foundedHPA.Spec.TargetCPUUtilizationPercentage = hpa.Spec.TargetCPUUtilizationPercentage
log.Info("Updating Service", "namespace", hpa.Namespace, "name", hpa.Name)
err := r.Update(ctx, foundedHPA)
if err != nil {
return err
}
}
return nil
}
func springBootContainer(springBootApp *appv1.SpringBootApp) corev1.Container {
container := corev1.Container{
Name: springBootApp.Name,
Image: fmt.Sprintf("%s:%s", springBootApp.Spec.ImageRepo, springBootApp.Spec.AppImage),
Ports: springBootApp.Spec.Ports,
Env: springBootApp.Spec.Env,
}
if len(springBootApp.Spec.LivenessProbePath) > 0 {
container.LivenessProbe = buildProbe(springBootApp.Spec.LivenessProbePath, 300, 30)
}
if len(springBootApp.Spec.ReadinessProbePath) > 0 {
container.ReadinessProbe = buildProbe(springBootApp.Spec.ReadinessProbePath, 30, 5)
}
if len(springBootApp.Spec.ConfigMap) > 0 {
container.VolumeMounts = []corev1.VolumeMount{
{
Name: DefaultConfigVolumeName,
ReadOnly: true,
MountPath: "/usr/local/app/config/application.yml",
SubPath: "application.yml",
},
}
}
if springBootApp.Spec.Resources != nil {
container.Resources = *springBootApp.Spec.Resources
} else {
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(100, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(512, resource.BinarySI),
},
}
}
if springBootApp.Spec.ImagePullPolicy != nil {
container.ImagePullPolicy = *springBootApp.Spec.ImagePullPolicy
} else {
container.ImagePullPolicy = corev1.PullIfNotPresent
}
return container
}
const (
DefaultFailureThreshold = 5
DefaultSuccessThreshold = 1
DefaultTimeoutSeconds = 5
)
func buildProbe(path string, delay, timeout int32) *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: path,
Port: intstr.IntOrString{IntVal: 8080},
Scheme: corev1.URISchemeHTTP,
},
},
FailureThreshold: DefaultFailureThreshold,
InitialDelaySeconds: delay,
SuccessThreshold: DefaultSuccessThreshold,
PeriodSeconds: timeout,
TimeoutSeconds: DefaultTimeoutSeconds,
}
}
func podAffinity(podAffinity *corev1.PodAffinity, podAntiAffinity *corev1.PodAntiAffinity) *corev1.Affinity {
if podAffinity == nil && podAntiAffinity == nil {
return nil
}
affinity := &corev1.Affinity{}
if podAffinity != nil {
affinity.PodAffinity = podAffinity
}
if podAntiAffinity != nil |
return affinity
}
func imagePullSecrets(imagePullSecrets string) []corev1.LocalObjectReference {
if len(imagePullSecrets) == 0 {
return nil
}
return []corev1.LocalObjectReference{
{Name: imagePullSecrets},
}
}
| {
affinity.PodAntiAffinity = podAntiAffinity
} | conditional_block |
springbootapp_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/prometheus/common/log"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "k8s.io/api/apps/v1"
appv1 "github.com/cy18cn/spring-boot-operator/api/v1"
)
// SpringBootAppReconciler reconciles a SpringBootApp object
type SpringBootAppReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
const (
DefaultConfigVolumeName = "app-config"
DefaultMinReplicas int32 = 2
DefaultMaxReplicas int32 = 6
)
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=app.k8s.airparking.cn,resources=springbootapps/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=services/status,verbs=get
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers/status,verbs=get
func (r *SpringBootAppReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
_ = r.Log.WithValues("springbootapp", req.NamespacedName)
springBootApp := &appv1.SpringBootApp{}
if err := r.Get(ctx, req.NamespacedName, springBootApp); err != nil {
if errors.IsNotFound(err) {
log.Error(err, "unable to found SpringBootAPP")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error request get springBootApp, requeue the request
return ctrl.Result{}, err
}
labels := map[string]string{
"app": springBootApp.Name,
"version": springBootApp.Spec.Version,
"deployment": fmt.Sprintf("%s-deployment", springBootApp.Name),
}
err := r.createOrUpdateDeployment(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdaterService(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
err = r.createOrUpdateHPA(ctx, springBootApp, labels)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *SpringBootAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appv1.SpringBootApp{}).
Complete(r)
}
func (r *SpringBootAppReconciler) createOrUpdateDeployment( | labels map[string]string,
) error {
// Define the desired Deployment object
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", springBootApp.Name, springBootApp.Spec.Version),
Namespace: springBootApp.Namespace,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: springBootApp.Spec.MinReplicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
springBootContainer(springBootApp),
},
Volumes: []corev1.Volume{
{
Name: DefaultConfigVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: springBootApp.Spec.ConfigMap,
},
},
},
},
},
},
},
},
}
imagePullSecrets := imagePullSecrets(springBootApp.Spec.ImagePullSecrets)
if imagePullSecrets != nil {
deployment.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets
}
affinity := podAffinity(springBootApp.Spec.PodAffinity, springBootApp.Spec.PodAntiAffinity)
if affinity != nil {
deployment.Spec.Template.Spec.Affinity = affinity
}
if err := controllerutil.SetControllerReference(springBootApp, deployment, r.Scheme); err != nil {
return err
}
// check if deployment is existed
foundedDep := &appsv1.Deployment{}
if err := r.Get(
ctx,
types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace},
foundedDep,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Deployment", "namespace", deployment.Namespace, "name", deployment.Name)
err = r.Create(ctx, deployment)
}
return err
}
// update the found object and write result back if it is changed
if !reflect.DeepEqual(deployment, foundedDep) {
foundedDep.Spec = deployment.Spec
log.Info("Updating deployment", "namespace", foundedDep.Namespace, "name", foundedDep.Name)
err := r.Update(ctx, foundedDep)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdaterService(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Namespace: springBootApp.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": springBootApp.Name,
},
},
}
if springBootApp.Spec.Ports != nil {
var ports []corev1.ServicePort
for _, port := range springBootApp.Spec.Ports {
ports = append(ports, corev1.ServicePort{
Name: "http",
Port: port.ContainerPort,
TargetPort: intstr.IntOrString{IntVal: port.ContainerPort},
})
}
svc.Spec.Ports = ports
} else {
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "http",
Port: 8080,
TargetPort: intstr.IntOrString{IntVal: 8080},
},
}
}
if err := controllerutil.SetControllerReference(springBootApp, svc, r.Scheme); err != nil {
return err
}
foundedSvc := &corev1.Service{}
if err := r.Get(
ctx,
types.NamespacedName{Name: springBootApp.Name, Namespace: springBootApp.Namespace},
foundedSvc,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating Service", "namespace", springBootApp.Namespace, "name", springBootApp.Name)
err = r.Create(ctx, svc)
}
return err
}
if !reflect.DeepEqual(foundedSvc.Spec.Ports, svc.Spec.Ports) {
foundedSvc.Spec.Ports = svc.Spec.Ports
log.Info("Updating Service", "namespace", foundedSvc.Namespace, "name", foundedSvc.Name)
err := r.Update(ctx, foundedSvc)
if err != nil {
return err
}
}
return nil
}
func (r *SpringBootAppReconciler) createOrUpdateHPA(
ctx context.Context,
springBootApp *appv1.SpringBootApp,
labels map[string]string,
) error {
// hpa
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: springBootApp.Name,
Labels: labels,
Namespace: springBootApp.Namespace,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
Kind: "Deployment",
Name: springBootApp.Name,
APIVersion: "apps/v1",
},
},
}
if springBootApp.Spec.MinReplicas != nil {
hpa.Spec.MinReplicas = springBootApp.Spec.MinReplicas
} else {
minReplicas := DefaultMinReplicas
hpa.Spec.MinReplicas = &minReplicas
}
if springBootApp.Spec.MaxReplicas != nil {
hpa.Spec.MaxReplicas = *springBootApp.Spec.MaxReplicas
} else {
hpa.Spec.MaxReplicas = DefaultMaxReplicas
}
if springBootApp.Spec.TargetCPUUtilizationPercentage != nil {
hpa.Spec.TargetCPUUtilizationPercentage = springBootApp.Spec.TargetCPUUtilizationPercentage
}
// SetControllerReference sets owner as a Controller OwnerReference on controlled.
if err := controllerutil.SetControllerReference(springBootApp, hpa, r.Scheme); err != nil {
return err
}
foundedHPA := &autoscalingv1.HorizontalPodAutoscaler{}
if err := r.Get(
ctx,
types.NamespacedName{Name: hpa.Name, Namespace: hpa.Namespace},
foundedHPA,
); err != nil {
if errors.IsNotFound(err) {
log.Info("Creating HorizontalPodAutoscaler", "namespace", hpa.Namespace, "name", hpa.Name)
err = r.Create(ctx, hpa)
}
return err
}
if foundedHPA.Spec.MinReplicas != hpa.Spec.MinReplicas ||
foundedHPA.Spec.MaxReplicas != hpa.Spec.MaxReplicas ||
foundedHPA.Spec.TargetCPUUtilizationPercentage != hpa.Spec.TargetCPUUtilizationPercentage {
foundedHPA.Spec.MinReplicas = hpa.Spec.MinReplicas
foundedHPA.Spec.MaxReplicas = hpa.Spec.MaxReplicas
foundedHPA.Spec.TargetCPUUtilizationPercentage = hpa.Spec.TargetCPUUtilizationPercentage
log.Info("Updating Service", "namespace", hpa.Namespace, "name", hpa.Name)
err := r.Update(ctx, foundedHPA)
if err != nil {
return err
}
}
return nil
}
func springBootContainer(springBootApp *appv1.SpringBootApp) corev1.Container {
container := corev1.Container{
Name: springBootApp.Name,
Image: fmt.Sprintf("%s:%s", springBootApp.Spec.ImageRepo, springBootApp.Spec.AppImage),
Ports: springBootApp.Spec.Ports,
Env: springBootApp.Spec.Env,
}
if len(springBootApp.Spec.LivenessProbePath) > 0 {
container.LivenessProbe = buildProbe(springBootApp.Spec.LivenessProbePath, 300, 30)
}
if len(springBootApp.Spec.ReadinessProbePath) > 0 {
container.ReadinessProbe = buildProbe(springBootApp.Spec.ReadinessProbePath, 30, 5)
}
if len(springBootApp.Spec.ConfigMap) > 0 {
container.VolumeMounts = []corev1.VolumeMount{
{
Name: DefaultConfigVolumeName,
ReadOnly: true,
MountPath: "/usr/local/app/config/application.yml",
SubPath: "application.yml",
},
}
}
if springBootApp.Spec.Resources != nil {
container.Resources = *springBootApp.Spec.Resources
} else {
container.Resources = corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(100, resource.DecimalSI),
corev1.ResourceMemory: *resource.NewQuantity(512, resource.BinarySI),
},
}
}
if springBootApp.Spec.ImagePullPolicy != nil {
container.ImagePullPolicy = *springBootApp.Spec.ImagePullPolicy
} else {
container.ImagePullPolicy = corev1.PullIfNotPresent
}
return container
}
const (
DefaultFailureThreshold = 5
DefaultSuccessThreshold = 1
DefaultTimeoutSeconds = 5
)
func buildProbe(path string, delay, timeout int32) *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: path,
Port: intstr.IntOrString{IntVal: 8080},
Scheme: corev1.URISchemeHTTP,
},
},
FailureThreshold: DefaultFailureThreshold,
InitialDelaySeconds: delay,
SuccessThreshold: DefaultSuccessThreshold,
PeriodSeconds: timeout,
TimeoutSeconds: DefaultTimeoutSeconds,
}
}
func podAffinity(podAffinity *corev1.PodAffinity, podAntiAffinity *corev1.PodAntiAffinity) *corev1.Affinity {
if podAffinity == nil && podAntiAffinity == nil {
return nil
}
affinity := &corev1.Affinity{}
if podAffinity != nil {
affinity.PodAffinity = podAffinity
}
if podAntiAffinity != nil {
affinity.PodAntiAffinity = podAntiAffinity
}
return affinity
}
func imagePullSecrets(imagePullSecrets string) []corev1.LocalObjectReference {
if len(imagePullSecrets) == 0 {
return nil
}
return []corev1.LocalObjectReference{
{Name: imagePullSecrets},
}
} | ctx context.Context,
springBootApp *appv1.SpringBootApp, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.