file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
bibtexParser.ts | export class RootNode {
type = 'root' as const;
constructor(public children: (TextNode | BlockNode)[] = []) {}
}
export class TextNode {
type = 'text' as const;
constructor(public parent: RootNode, public text: string) {
parent.children.push(this);
}
}
export class BlockNode {
type = 'block' as const;
public command = '';
public block?: CommentNode | PreambleNode | StringNode | EntryNode;
constructor(public parent: RootNode) {
parent.children.push(this);
}
}
export class CommentNode {
type = 'comment' as const;
constructor(
public parent: BlockNode,
public raw: string,
public braces: number,
public parens: number
) {
parent.block = this;
}
}
class PreambleNode {
type = 'preamble' as const;
constructor(
public parent: BlockNode,
public raw: string,
public braces: number,
public parens: number
) {
parent.block = this;
}
}
class StringNode {
type = 'string' as const;
constructor(
public parent: BlockNode,
public raw: string,
public braces: number,
public parens: number
) {
parent.block = this;
}
}
export class EntryNode {
type = 'entry' as const;
key?: string;
keyEnded?: boolean;
fields: FieldNode[];
constructor(public parent: BlockNode, public wrapType: '{' | '(') {
parent.block = this;
this.fields = [];
}
}
export class FieldNode {
type = 'field' as const;
/** Each value is concatenated */
value: ConcatNode;
constructor(public parent: EntryNode, public name: string = '') {
this.value = new ConcatNode(this);
}
}
class ConcatNode {
type = 'concat' as const;
concat: (LiteralNode | BracedNode | QuotedNode)[];
canConsumeValue = true;
constructor(public parent: FieldNode) {
this.concat = [];
}
}
class LiteralNode {
type = 'literal' as const;
constructor(public parent: ConcatNode, public value: string) {
parent.concat.push(this);
}
}
class BracedNode {
type = 'braced' as const;
value = '';
/** Used to count opening and closing braces */
depth = 0;
constructor(public parent: ConcatNode) {
parent.concat.push(this);
}
}
class QuotedNode {
type = 'quoted' as const;
value = '';
/** Used to count opening and closing braces */
depth = 0;
constructor(public parent: ConcatNode) {
parent.concat.push(this);
}
}
type Node =
| RootNode
| TextNode
| BlockNode
| EntryNode
| CommentNode
| PreambleNode
| StringNode
| FieldNode
| ConcatNode
| LiteralNode
| BracedNode
| QuotedNode;
export function generateAST(input: string): RootNode {
const rootNode = new RootNode();
let node: Node = rootNode;
let line = 1;
let column = 0;
for (let i = 0; i < input.length; i++) {
const char = input[i]!;
const prev = input[i - 1]!;
if (char === '\n') {
line++;
column = 0;
}
column++;
switch (node.type) {
case 'root': {
node = char === '@' ? new BlockNode(node) : new TextNode(node, char);
break;
}
case 'text': {
// Whitespace or closing curly brace should precede an entry. This might
// not be correct but allows parsing of "valid" bibtex files in the
// wild.
if (char === '@' && /[\s\r\n}]/.test(prev)) {
node = new BlockNode(node.parent);
} else {
node.text += char;
}
break;
}
case 'block': {
if (char === '@') {
// everything prior to this was a comment
const prevNode =
node.parent.children[node.parent.children.length - 2];
if (prevNode?.type === 'text') {
prevNode.text += '@' + node.command;
} else {
// insert text node 1 from the end
node.parent.children.pop();
new TextNode(node.parent, '@' + node.command);
node.parent.children.push(node);
}
node.command = '';
} else if (char === '{' || char === '(') {
const commandTrimmed = node.command.trim();
if (commandTrimmed === '' || /\s/.test(commandTrimmed)) {
// A block without a command is invalid. It's sometimes used in comments though, e.g. @(#)
// replace the block node
node.parent.children.pop();
node = new TextNode(node.parent, '@' + node.command + char);
} else {
node.command = commandTrimmed;
const command: string = node.command.toLowerCase();
const [braces, parens] = char === '{' ? [1, 0] : [0, 1];
const raw = '@' + command + char;
switch (command) {
case 'string':
node = new StringNode(node, raw, braces, parens);
break;
case 'preamble':
node = new PreambleNode(node, raw, braces, parens);
break;
case 'comment':
node = new CommentNode(node, raw, braces, parens);
break;
default:
node = new EntryNode(node, char);
break;
}
}
} else if (char.match(/[=#,})[\]]/)) {
// replace the block node
node.parent.children.pop();
node = new TextNode(node.parent, '@' + node.command + char);
} else {
node.command += char;
}
break;
}
case 'comment':
case 'string':
case 'preamble':
if (char === '{') {
node.braces++;
} else if (char === '}') {
node.braces--;
} else if (char === '(') {
node.parens++;
} else if (char === ')') {
node.parens--;
}
node.raw += char;
if (node.braces === 0 && node.parens === 0) {
node = node.parent.parent; // root
}
break;
case 'entry': {
if (isWhitespace(char)) {
if (!node.key) {
// Before key, ignore
} else {
// Ensure subsequent characters are not appended to the key
node.keyEnded = true;
}
} else if (char === ',') {
node = new FieldNode(node);
} else if (
(node.wrapType === '{' && char === '}') ||
(node.wrapType === '(' && char === ')')
) {
node = node.parent.parent; // root
} else if (char === '=' && node.key && isValidFieldName(node.key)) {
// Entry has no key, this is a field name
const field: FieldNode = new FieldNode(node, node.key);
node.fields.push(field);
node.key = undefined;
node = field.value;
} else if (node.keyEnded) {
throw new BibTeXSyntaxError(
input,
node,
i,
line,
column,
`The entry key cannot contain whitespace`
);
} else if (!isValidKeyCharacter(char)) {
throw new BibTeXSyntaxError(
input,
node,
i,
line,
column,
`The entry key cannot contain the character (${char})`
);
} else {
node.key = (node.key ?? '') + char;
}
break;
}
case 'field': {
if (char === '}' || char === ')') {
node.name = node.name.trim();
node = node.parent.parent.parent; // root
} else if (char === '=') {
node.name = node.name.trim();
node = node.value;
} else if (char === ',') {
node.name = node.name.trim();
node = new FieldNode(node.parent);
} else if (!isValidFieldName(char)) {
throw new BibTeXSyntaxError(input, node, i, line, column);
} else if (!node.name) {
if (!isWhitespace(char)) {
node.parent.fields.push(node);
node.name = char;
} else {
// noop
}
} else {
node.name += char;
}
break;
}
case 'concat': {
if (isWhitespace(char)) {
break; // noop
} else if (node.canConsumeValue) {
if (/[#=,}()[\]]/.test(char)) {
throw new BibTeXSyntaxError(input, node, i, line, column);
} else {
node.canConsumeValue = false;
if (char === '{') {
node = new BracedNode(node);
} else if (char === '"') {
node = new QuotedNode(node);
} else {
node = new LiteralNode(node, char);
}
}
} else {
if (char === ',') {
node = new FieldNode(node.parent.parent);
} else if (char === '}' || char === ')') {
node = node.parent.parent.parent.parent; // root
} else if (char === '#') {
node.canConsumeValue = true;
} else {
throw new BibTeXSyntaxError(input, node, i, line, column);
}
}
break;
}
case 'literal':
if (isWhitespace(char)) {
// end of literal
node = node.parent;
} else if (char === ',') {
node = new FieldNode(node.parent.parent.parent);
} else if (char === '}') {
node = node.parent.parent.parent.parent.parent; // root
} else if (char === '#') {
node = node.parent;
node.canConsumeValue = true;
} else {
node.value += char;
}
break;
// Values may be enclosed in curly braces. Curly braces may be used within
// the value but they must be balanced.
case 'braced':
if (char === '}' && node.depth === 0) {
node = node.parent; // values
break;
} else if (char === '{') {
node.depth++;
} else if (char === '}') {
node.depth--;
}
node.value += char;
break;
// Values may be enclosed in double quotes. Curly braces may be used
// within quoted values but they must be balanced.
//
// To escape a double quote, surround it with braces `{"}`.
// https://web.archive.org/web/20210422110817/https://maverick.inria.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html
case 'quoted':
if (char === '"' && node.depth === 0) {
node = node.parent; // values
break;
} else if (char === '{') {
node.depth++;
} else if (char === '}') {
node.depth--;
if (node.depth < 0) {
throw new BibTeXSyntaxError(input, node, i, line, column);
}
}
node.value += char;
break;
}
}
return rootNode;
}
function isWhitespace(string: string): boolean {
return /^[ \t\n\r]*$/.test(string);
}
/**
* Certain characters are special in latex: {}%#$~. These cannot be used in
* \cite without error. See https://tex.stackexchange.com/a/408548
*/
function isValidKeyCharacter(char: string): boolean |
function isValidFieldName(char: string): boolean {
return !/[=,{}()[\]]/.test(char);
}
export class BibTeXSyntaxError extends Error {
public char: string;
constructor(
input: string,
public node: Node,
pos: number,
public line: number,
public column: number,
public hint?: string
) {
super(
`Line ${line}:${column}: Syntax Error in ${node.type} (${hint})\n` +
input.slice(Math.max(0, pos - 20), pos) +
'>>' +
input[pos] +
'<<' +
input.slice(pos + 1, pos + 20)
);
this.name = 'Syntax Error';
this.char = input[pos]!;
}
}
| {
return !/[#%{}~$,]/.test(char);
} | identifier_body |
document.py | import cgi
import logging
from normality import slugify
from followthemoney import model
from followthemoney.types import registry
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db, cache
from aleph.model.metadata import Metadata
from aleph.model.collection import Collection
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.util import filter_texts
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
MAX_TAGS = 10000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def | (self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set('columns', registry.json.pack(columns), quiet=True)
proxy.set('headers', registry.json.pack(headers), quiet=True)
pdf = 'application/pdf'
if meta.get('extension') == 'pdf' or proxy.first('mimeType') == pdf:
proxy.set('pdfHash', self.content_hash, quiet=True)
proxy.add('pdfHash', meta.get('pdf_version'), quiet=True)
q = db.session.query(DocumentTag)
q = q.filter(DocumentTag.document_id == self.id)
q = q.filter(DocumentTag.type.in_(DocumentTag.MAPPING.keys()))
q = q.order_by(DocumentTag.weight.desc())
q = q.limit(Document.MAX_TAGS)
for tag in q.all():
prop = DocumentTag.MAPPING.get(tag.type)
if prop is not None:
proxy.add(prop, tag.text)
return proxy
def to_dict(self):
proxy = self.to_proxy()
data = proxy.to_full_dict()
data.update(self.to_dict_dates())
data.update({
'name': self.name,
'status': self.status,
'foreign_id': self.foreign_id,
'document_id': self.id,
'collection_id': self.collection_id,
'error_message': self.error_message,
'uploader_id': self.uploader_id,
'bulk': False,
})
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.schema, self.title)
| supports_records | identifier_name |
document.py | import cgi
import logging
from normality import slugify
from followthemoney import model
from followthemoney.types import registry
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db, cache
from aleph.model.metadata import Metadata
from aleph.model.collection import Collection
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.util import filter_texts
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
MAX_TAGS = 10000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
|
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set('columns', registry.json.pack(columns), quiet=True)
proxy.set('headers', registry.json.pack(headers), quiet=True)
pdf = 'application/pdf'
if meta.get('extension') == 'pdf' or proxy.first('mimeType') == pdf:
proxy.set('pdfHash', self.content_hash, quiet=True)
proxy.add('pdfHash', meta.get('pdf_version'), quiet=True)
q = db.session.query(DocumentTag)
q = q.filter(DocumentTag.document_id == self.id)
q = q.filter(DocumentTag.type.in_(DocumentTag.MAPPING.keys()))
q = q.order_by(DocumentTag.weight.desc())
q = q.limit(Document.MAX_TAGS)
for tag in q.all():
prop = DocumentTag.MAPPING.get(tag.type)
if prop is not None:
proxy.add(prop, tag.text)
return proxy
def to_dict(self):
proxy = self.to_proxy()
data = proxy.to_full_dict()
data.update(self.to_dict_dates())
data.update({
'name': self.name,
'status': self.status,
'foreign_id': self.foreign_id,
'document_id': self.id,
'collection_id': self.collection_id,
'error_message': self.error_message,
'uploader_id': self.uploader_id,
'bulk': False,
})
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.schema, self.title)
| value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value) | conditional_block |
document.py | import cgi
import logging
from normality import slugify
from followthemoney import model
from followthemoney.types import registry
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db, cache
from aleph.model.metadata import Metadata
from aleph.model.collection import Collection
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.util import filter_texts
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
MAX_TAGS = 10000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
|
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set('columns', registry.json.pack(columns), quiet=True)
proxy.set('headers', registry.json.pack(headers), quiet=True)
pdf = 'application/pdf'
if meta.get('extension') == 'pdf' or proxy.first('mimeType') == pdf:
proxy.set('pdfHash', self.content_hash, quiet=True)
proxy.add('pdfHash', meta.get('pdf_version'), quiet=True)
q = db.session.query(DocumentTag)
q = q.filter(DocumentTag.document_id == self.id)
q = q.filter(DocumentTag.type.in_(DocumentTag.MAPPING.keys()))
q = q.order_by(DocumentTag.weight.desc())
q = q.limit(Document.MAX_TAGS)
for tag in q.all():
prop = DocumentTag.MAPPING.get(tag.type)
if prop is not None:
proxy.add(prop, tag.text)
return proxy
def to_dict(self):
proxy = self.to_proxy()
data = proxy.to_full_dict()
data.update(self.to_dict_dates())
data.update({
'name': self.name,
'status': self.status,
'foreign_id': self.foreign_id,
'document_id': self.id,
'collection_id': self.collection_id,
'error_message': self.error_message,
'uploader_id': self.uploader_id,
'bulk': False,
})
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.schema, self.title)
| flag_modified(self, 'meta') | identifier_body |
document.py | import cgi
import logging
from normality import slugify
from followthemoney import model
from followthemoney.types import registry
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db, cache
from aleph.model.metadata import Metadata
from aleph.model.collection import Collection
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.util import filter_texts
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
MAX_TAGS = 10000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name |
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set('columns', registry.json.pack(columns), quiet=True)
proxy.set('headers', registry.json.pack(headers), quiet=True)
pdf = 'application/pdf'
if meta.get('extension') == 'pdf' or proxy.first('mimeType') == pdf:
proxy.set('pdfHash', self.content_hash, quiet=True)
proxy.add('pdfHash', meta.get('pdf_version'), quiet=True)
q = db.session.query(DocumentTag)
q = q.filter(DocumentTag.document_id == self.id)
q = q.filter(DocumentTag.type.in_(DocumentTag.MAPPING.keys()))
q = q.order_by(DocumentTag.weight.desc())
q = q.limit(Document.MAX_TAGS)
for tag in q.all():
prop = DocumentTag.MAPPING.get(tag.type)
if prop is not None:
proxy.add(prop, tag.text)
return proxy
def to_dict(self):
proxy = self.to_proxy()
data = proxy.to_full_dict()
data.update(self.to_dict_dates())
data.update({
'name': self.name,
'status': self.status,
'foreign_id': self.foreign_id,
'document_id': self.id,
'collection_id': self.collection_id,
'error_message': self.error_message,
'uploader_id': self.uploader_id,
'bulk': False,
})
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.schema, self.title) | if self.source_url is not None:
return self.source_url | random_line_split |
util.go | // Package ppsutil contains utilities for various PPS-related tasks, which are
// shared by both the PPS API and the worker binary. These utilities include:
// - Getting the RC name and querying k8s reguarding pipelines
// - Reading and writing pipeline resource requests and limits
// - Reading and writing EtcdPipelineInfos and PipelineInfos[1]
//
// [1] Note that PipelineInfo in particular is complicated because it contains
// fields that are not always set or are stored in multiple places
// ('job_state', for example, is not stored in PFS along with the rest of each
// PipelineInfo, because this field is volatile and we cannot commit to PFS
// every time it changes. 'job_counts' is the same, and 'reason' is in etcd
// because it is only updated alongside 'job_state'). As of 12/7/2017, these
// are the only fields not stored in PFS.
package ppsutil
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits | jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
Salt: pipelineInfo.Salt,
}
}
// PipelineManifestReader helps with unmarshalling pipeline configs from JSON. It's used by
// create-pipeline and update-pipeline
type PipelineManifestReader struct {
buf bytes.Buffer
decoder *json.Decoder
}
// NewPipelineManifestReader creates a new manifest reader from a path.
func NewPipelineManifestReader(path string) (result *PipelineManifestReader, retErr error) {
result = &PipelineManifestReader{}
var pipelineReader io.Reader
if path == "-" {
pipelineReader = io.TeeReader(os.Stdin, &result.buf)
fmt.Print("Reading from stdin.\n")
} else if url, err := url.Parse(path); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
rawBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
} else {
rawBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
}
result.decoder = json.NewDecoder(pipelineReader)
return result, nil
}
// NextCreatePipelineRequest gets the next request from the manifest reader.
func (r *PipelineManifestReader) NextCreatePipelineRequest() (*ppsclient.CreatePipelineRequest, error) {
var result ppsclient.CreatePipelineRequest
if err := jsonpb.UnmarshalNext(r.decoder, &result); err != nil {
if err == io.EOF {
return nil, err
}
return nil, fmt.Errorf("malformed pipeline spec: %s", err)
}
return &result, nil
}
// DescribeSyntaxError describes a syntax error encountered parsing json.
func DescribeSyntaxError(originalErr error, parsedBuffer bytes.Buffer) error {
sErr, ok := originalErr.(*json.SyntaxError)
if !ok {
return originalErr
}
buffer := make([]byte, sErr.Offset)
parsedBuffer.Read(buffer)
lineOffset := strings.LastIndex(string(buffer[:len(buffer)-1]), "\n")
if lineOffset == -1 {
lineOffset = 0
}
lines := strings.Split(string(buffer[:len(buffer)-1]), "\n")
lineNumber := len(lines)
descriptiveErrorString := fmt.Sprintf("Syntax Error on line %v:\n%v\n%v^\n%v\n",
lineNumber,
string(buffer[lineOffset:]),
strings.Repeat(" ", int(sErr.Offset)-2-lineOffset),
originalErr,
)
return errors.New(descriptiveErrorString)
}
// IsTerminal returns 'true' if 'state' indicates that the job is done (i.e.
// the state will not change later: SUCCESS, FAILURE, KILLED) and 'false'
// otherwise.
func IsTerminal(state pps.JobState) bool {
switch state {
case pps.JobState_JOB_SUCCESS, pps.JobState_JOB_FAILURE, pps.JobState_JOB_KILLED:
return true
case pps.JobState_JOB_STARTING, pps.JobState_JOB_RUNNING:
return false
default:
panic(fmt.Sprintf("unrecognized job state: %s", state))
}
} | branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
} | random_line_split |
util.go | // Package ppsutil contains utilities for various PPS-related tasks, which are
// shared by both the PPS API and the worker binary. These utilities include:
// - Getting the RC name and querying k8s reguarding pipelines
// - Reading and writing pipeline resource requests and limits
// - Reading and writing EtcdPipelineInfos and PipelineInfos[1]
//
// [1] Note that PipelineInfo in particular is complicated because it contains
// fields that are not always set or are stored in multiple places
// ('job_state', for example, is not stored in PFS along with the rest of each
// PipelineInfo, because this field is volatile and we cannot commit to PFS
// every time it changes. 'job_counts' is the same, and 'reason' is in etcd
// because it is only updated alongside 'job_state'). As of 12/7/2017, these
// are the only fields not stored in PFS.
package ppsutil
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) |
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
Salt: pipelineInfo.Salt,
}
}
// PipelineManifestReader helps with unmarshalling pipeline configs from JSON. It's used by
// create-pipeline and update-pipeline
type PipelineManifestReader struct {
buf bytes.Buffer
decoder *json.Decoder
}
// NewPipelineManifestReader creates a new manifest reader from a path.
func NewPipelineManifestReader(path string) (result *PipelineManifestReader, retErr error) {
result = &PipelineManifestReader{}
var pipelineReader io.Reader
if path == "-" {
pipelineReader = io.TeeReader(os.Stdin, &result.buf)
fmt.Print("Reading from stdin.\n")
} else if url, err := url.Parse(path); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
rawBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
} else {
rawBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
}
result.decoder = json.NewDecoder(pipelineReader)
return result, nil
}
// NextCreatePipelineRequest gets the next request from the manifest reader.
func (r *PipelineManifestReader) NextCreatePipelineRequest() (*ppsclient.CreatePipelineRequest, error) {
var result ppsclient.CreatePipelineRequest
if err := jsonpb.UnmarshalNext(r.decoder, &result); err != nil {
if err == io.EOF {
return nil, err
}
return nil, fmt.Errorf("malformed pipeline spec: %s", err)
}
return &result, nil
}
// DescribeSyntaxError describes a syntax error encountered parsing json.
func DescribeSyntaxError(originalErr error, parsedBuffer bytes.Buffer) error {
sErr, ok := originalErr.(*json.SyntaxError)
if !ok {
return originalErr
}
buffer := make([]byte, sErr.Offset)
parsedBuffer.Read(buffer)
lineOffset := strings.LastIndex(string(buffer[:len(buffer)-1]), "\n")
if lineOffset == -1 {
lineOffset = 0
}
lines := strings.Split(string(buffer[:len(buffer)-1]), "\n")
lineNumber := len(lines)
descriptiveErrorString := fmt.Sprintf("Syntax Error on line %v:\n%v\n%v^\n%v\n",
lineNumber,
string(buffer[lineOffset:]),
strings.Repeat(" ", int(sErr.Offset)-2-lineOffset),
originalErr,
)
return errors.New(descriptiveErrorString)
}
// IsTerminal returns 'true' if 'state' indicates that the job is done (i.e.
// the state will not change later: SUCCESS, FAILURE, KILLED) and 'false'
// otherwise.
func IsTerminal(state pps.JobState) bool {
switch state {
case pps.JobState_JOB_SUCCESS, pps.JobState_JOB_FAILURE, pps.JobState_JOB_KILLED:
return true
case pps.JobState_JOB_STARTING, pps.JobState_JOB_RUNNING:
return false
default:
panic(fmt.Sprintf("unrecognized job state: %s", state))
}
}
| {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
} | identifier_body |
util.go | // Package ppsutil contains utilities for various PPS-related tasks, which are
// shared by both the PPS API and the worker binary. These utilities include:
// - Getting the RC name and querying k8s reguarding pipelines
// - Reading and writing pipeline resource requests and limits
// - Reading and writing EtcdPipelineInfos and PipelineInfos[1]
//
// [1] Note that PipelineInfo in particular is complicated because it contains
// fields that are not always set or are stored in multiple places
// ('job_state', for example, is not stored in PFS along with the rest of each
// PipelineInfo, because this field is volatile and we cannot commit to PFS
// every time it changes. 'job_counts' is the same, and 'reason' is in etcd
// because it is only updated alongside 'job_state'). As of 12/7/2017, these
// are the only fields not stored in PFS.
package ppsutil
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func getResourceListFromSpec(resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok |
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
Salt: pipelineInfo.Salt,
}
}
// PipelineManifestReader helps with unmarshalling pipeline configs from JSON. It's used by
// create-pipeline and update-pipeline
type PipelineManifestReader struct {
buf bytes.Buffer
decoder *json.Decoder
}
// NewPipelineManifestReader creates a new manifest reader from a path.
func NewPipelineManifestReader(path string) (result *PipelineManifestReader, retErr error) {
result = &PipelineManifestReader{}
var pipelineReader io.Reader
if path == "-" {
pipelineReader = io.TeeReader(os.Stdin, &result.buf)
fmt.Print("Reading from stdin.\n")
} else if url, err := url.Parse(path); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
rawBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
} else {
rawBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
}
result.decoder = json.NewDecoder(pipelineReader)
return result, nil
}
// NextCreatePipelineRequest gets the next request from the manifest reader.
func (r *PipelineManifestReader) NextCreatePipelineRequest() (*ppsclient.CreatePipelineRequest, error) {
var result ppsclient.CreatePipelineRequest
if err := jsonpb.UnmarshalNext(r.decoder, &result); err != nil {
if err == io.EOF {
return nil, err
}
return nil, fmt.Errorf("malformed pipeline spec: %s", err)
}
return &result, nil
}
// DescribeSyntaxError describes a syntax error encountered parsing json.
func DescribeSyntaxError(originalErr error, parsedBuffer bytes.Buffer) error {
sErr, ok := originalErr.(*json.SyntaxError)
if !ok {
return originalErr
}
buffer := make([]byte, sErr.Offset)
parsedBuffer.Read(buffer)
lineOffset := strings.LastIndex(string(buffer[:len(buffer)-1]), "\n")
if lineOffset == -1 {
lineOffset = 0
}
lines := strings.Split(string(buffer[:len(buffer)-1]), "\n")
lineNumber := len(lines)
descriptiveErrorString := fmt.Sprintf("Syntax Error on line %v:\n%v\n%v^\n%v\n",
lineNumber,
string(buffer[lineOffset:]),
strings.Repeat(" ", int(sErr.Offset)-2-lineOffset),
originalErr,
)
return errors.New(descriptiveErrorString)
}
// IsTerminal returns 'true' if 'state' indicates that the job is done (i.e.
// the state will not change later: SUCCESS, FAILURE, KILLED) and 'false'
// otherwise.
func IsTerminal(state pps.JobState) bool {
switch state {
case pps.JobState_JOB_SUCCESS, pps.JobState_JOB_FAILURE, pps.JobState_JOB_KILLED:
return true
case pps.JobState_JOB_STARTING, pps.JobState_JOB_RUNNING:
return false
default:
panic(fmt.Sprintf("unrecognized job state: %s", state))
}
}
| {
input.Cron.Commit = commit.ID
} | conditional_block |
util.go | // Package ppsutil contains utilities for various PPS-related tasks, which are
// shared by both the PPS API and the worker binary. These utilities include:
// - Getting the RC name and querying k8s reguarding pipelines
// - Reading and writing pipeline resource requests and limits
// - Reading and writing EtcdPipelineInfos and PipelineInfos[1]
//
// [1] Note that PipelineInfo in particular is complicated because it contains
// fields that are not always set or are stored in multiple places
// ('job_state', for example, is not stored in PFS along with the rest of each
// PipelineInfo, because this field is volatile and we cannot commit to PFS
// every time it changes. 'job_counts' is the same, and 'reason' is in etcd
// because it is only updated alongside 'job_state'). As of 12/7/2017, these
// are the only fields not stored in PFS.
package ppsutil
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pps"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
etcd "github.com/coreos/etcd/clientv3"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kube "k8s.io/client-go/kubernetes"
)
// PipelineRepo creates a pfs repo for a given pipeline.
func PipelineRepo(pipeline *ppsclient.Pipeline) *pfs.Repo {
return &pfs.Repo{Name: pipeline.Name}
}
// PipelineRcName generates the name of the k8s replication controller that
// manages a pipeline's workers
func PipelineRcName(name string, version uint64) string {
// k8s won't allow RC names that contain upper-case letters
// or underscores
// TODO: deal with name collision
name = strings.Replace(name, "_", "-", -1)
return fmt.Sprintf("pipeline-%s-v%d", strings.ToLower(name), version)
}
// GetRequestsResourceListFromPipeline returns a list of resources that the pipeline,
// minimally requires.
func GetRequestsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceRequests, pipelineInfo.CacheSize)
}
func | (resources *pps.ResourceSpec, cacheSize string) (*v1.ResourceList, error) {
var result v1.ResourceList = make(map[v1.ResourceName]resource.Quantity)
cpuStr := fmt.Sprintf("%f", resources.Cpu)
cpuQuantity, err := resource.ParseQuantity(cpuStr)
if err != nil {
log.Warnf("error parsing cpu string: %s: %+v", cpuStr, err)
} else {
result[v1.ResourceCPU] = cpuQuantity
}
memQuantity, err := resource.ParseQuantity(resources.Memory)
if err != nil {
log.Warnf("error parsing memory string: %s: %+v", resources.Memory, err)
} else {
result[v1.ResourceMemory] = memQuantity
}
// Here we are sanity checking. A pipeline should request at least
// as much memory as it needs for caching.
cacheQuantity, err := resource.ParseQuantity(cacheSize)
if err != nil {
log.Warnf("error parsing cache string: %s: %+v", cacheSize, err)
} else if cacheQuantity.Cmp(memQuantity) > 0 {
result[v1.ResourceMemory] = cacheQuantity
}
if resources.Gpu != 0 {
gpuStr := fmt.Sprintf("%d", resources.Gpu)
gpuQuantity, err := resource.ParseQuantity(gpuStr)
if err != nil {
log.Warnf("error parsing gpu string: %s: %+v", gpuStr, err)
} else {
result[v1.ResourceNvidiaGPU] = gpuQuantity
}
}
return &result, nil
}
// GetLimitsResourceListFromPipeline returns a list of resources that the pipeline,
// maximally is limited to.
func GetLimitsResourceListFromPipeline(pipelineInfo *pps.PipelineInfo) (*v1.ResourceList, error) {
return getResourceListFromSpec(pipelineInfo.ResourceLimits, pipelineInfo.CacheSize)
}
// getNumNodes attempts to retrieve the number of nodes in the current k8s
// cluster
func getNumNodes(kubeClient *kube.Clientset) (int, error) {
nodeList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("unable to retrieve node list from k8s to determine parallelism: %v", err)
}
if len(nodeList.Items) == 0 {
return 0, fmt.Errorf("pachyderm.pps.jobserver: no k8s nodes found")
}
return len(nodeList.Items), nil
}
// GetExpectedNumWorkers computes the expected number of workers that
// pachyderm will start given the ParallelismSpec 'spec'.
//
// This is only exported for testing
func GetExpectedNumWorkers(kubeClient *kube.Clientset, spec *ppsclient.ParallelismSpec) (int, error) {
if spec == nil || (spec.Constant == 0 && spec.Coefficient == 0) {
return 1, nil
} else if spec.Constant > 0 && spec.Coefficient == 0 {
return int(spec.Constant), nil
} else if spec.Constant == 0 && spec.Coefficient > 0 {
// Start ('coefficient' * 'nodes') workers. Determine number of workers
numNodes, err := getNumNodes(kubeClient)
if err != nil {
return 0, err
}
result := math.Floor(spec.Coefficient * float64(numNodes))
return int(math.Max(result, 1)), nil
}
return 0, fmt.Errorf("Unable to interpret ParallelismSpec %+v", spec)
}
// GetPipelineInfo retrieves and returns a valid PipelineInfo from PFS. It does
// the PFS read/unmarshalling of bytes as well as filling in missing fields
func GetPipelineInfo(pachClient *client.APIClient, ptr *pps.EtcdPipelineInfo) (*pps.PipelineInfo, error) {
buf := bytes.Buffer{}
if err := pachClient.GetFile(ppsconsts.SpecRepo, ptr.SpecCommit.ID, ppsconsts.SpecFile, 0, 0, &buf); err != nil {
return nil, fmt.Errorf("could not read existing PipelineInfo from PFS: %v", err)
}
result := &pps.PipelineInfo{}
if err := result.Unmarshal(buf.Bytes()); err != nil {
return nil, fmt.Errorf("could not unmarshal PipelineInfo bytes from PFS: %v", err)
}
result.State = ptr.State
result.Reason = ptr.Reason
result.JobCounts = ptr.JobCounts
result.SpecCommit = ptr.SpecCommit
return result, nil
}
// FailPipeline updates the pipeline's state to failed and sets the failure reason
func FailPipeline(ctx context.Context, etcdClient *etcd.Client, pipelinesCollection col.Collection, pipelineName string, reason string) error {
_, err := col.NewSTM(ctx, etcdClient, func(stm col.STM) error {
pipelines := pipelinesCollection.ReadWrite(stm)
pipelinePtr := new(pps.EtcdPipelineInfo)
if err := pipelines.Get(pipelineName, pipelinePtr); err != nil {
return err
}
pipelinePtr.State = pps.PipelineState_PIPELINE_FAILURE
pipelinePtr.Reason = reason
pipelines.Put(pipelineName, pipelinePtr)
return nil
})
return err
}
// JobInput fills in the commits for a JobInfo
func JobInput(pipelineInfo *pps.PipelineInfo, outputCommitInfo *pfs.CommitInfo) *pps.Input {
// branchToCommit maps strings of the form "<repo>/<branch>" to PFS commits
branchToCommit := make(map[string]*pfs.Commit)
key := path.Join
for i, provCommit := range outputCommitInfo.Provenance {
branchToCommit[key(provCommit.Repo.Name, outputCommitInfo.BranchProvenance[i].Name)] = provCommit
}
jobInput := proto.Clone(pipelineInfo.Input).(*pps.Input)
pps.VisitInput(jobInput, func(input *pps.Input) {
if input.Atom != nil {
if commit, ok := branchToCommit[key(input.Atom.Repo, input.Atom.Branch)]; ok {
input.Atom.Commit = commit.ID
}
}
if input.Cron != nil {
if commit, ok := branchToCommit[key(input.Cron.Repo, "master")]; ok {
input.Cron.Commit = commit.ID
}
}
if input.Git != nil {
if commit, ok := branchToCommit[key(input.Git.Name, input.Git.Branch)]; ok {
input.Git.Commit = commit.ID
}
}
})
return jobInput
}
// PipelineReqFromInfo converts a PipelineInfo into a CreatePipelineRequest.
func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {
return &ppsclient.CreatePipelineRequest{
Pipeline: pipelineInfo.Pipeline,
Transform: pipelineInfo.Transform,
ParallelismSpec: pipelineInfo.ParallelismSpec,
Egress: pipelineInfo.Egress,
OutputBranch: pipelineInfo.OutputBranch,
ScaleDownThreshold: pipelineInfo.ScaleDownThreshold,
ResourceRequests: pipelineInfo.ResourceRequests,
ResourceLimits: pipelineInfo.ResourceLimits,
Input: pipelineInfo.Input,
Description: pipelineInfo.Description,
Incremental: pipelineInfo.Incremental,
CacheSize: pipelineInfo.CacheSize,
EnableStats: pipelineInfo.EnableStats,
Batch: pipelineInfo.Batch,
MaxQueueSize: pipelineInfo.MaxQueueSize,
Service: pipelineInfo.Service,
ChunkSpec: pipelineInfo.ChunkSpec,
DatumTimeout: pipelineInfo.DatumTimeout,
JobTimeout: pipelineInfo.JobTimeout,
Salt: pipelineInfo.Salt,
}
}
// PipelineManifestReader helps with unmarshalling pipeline configs from JSON. It's used by
// create-pipeline and update-pipeline
type PipelineManifestReader struct {
buf bytes.Buffer
decoder *json.Decoder
}
// NewPipelineManifestReader creates a new manifest reader from a path.
func NewPipelineManifestReader(path string) (result *PipelineManifestReader, retErr error) {
result = &PipelineManifestReader{}
var pipelineReader io.Reader
if path == "-" {
pipelineReader = io.TeeReader(os.Stdin, &result.buf)
fmt.Print("Reading from stdin.\n")
} else if url, err := url.Parse(path); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
rawBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
} else {
rawBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
pipelineReader = io.TeeReader(strings.NewReader(string(rawBytes)), &result.buf)
}
result.decoder = json.NewDecoder(pipelineReader)
return result, nil
}
// NextCreatePipelineRequest gets the next request from the manifest reader.
func (r *PipelineManifestReader) NextCreatePipelineRequest() (*ppsclient.CreatePipelineRequest, error) {
var result ppsclient.CreatePipelineRequest
if err := jsonpb.UnmarshalNext(r.decoder, &result); err != nil {
if err == io.EOF {
return nil, err
}
return nil, fmt.Errorf("malformed pipeline spec: %s", err)
}
return &result, nil
}
// DescribeSyntaxError describes a syntax error encountered parsing json.
func DescribeSyntaxError(originalErr error, parsedBuffer bytes.Buffer) error {
sErr, ok := originalErr.(*json.SyntaxError)
if !ok {
return originalErr
}
buffer := make([]byte, sErr.Offset)
parsedBuffer.Read(buffer)
lineOffset := strings.LastIndex(string(buffer[:len(buffer)-1]), "\n")
if lineOffset == -1 {
lineOffset = 0
}
lines := strings.Split(string(buffer[:len(buffer)-1]), "\n")
lineNumber := len(lines)
descriptiveErrorString := fmt.Sprintf("Syntax Error on line %v:\n%v\n%v^\n%v\n",
lineNumber,
string(buffer[lineOffset:]),
strings.Repeat(" ", int(sErr.Offset)-2-lineOffset),
originalErr,
)
return errors.New(descriptiveErrorString)
}
// IsTerminal returns 'true' if 'state' indicates that the job is done (i.e.
// the state will not change later: SUCCESS, FAILURE, KILLED) and 'false'
// otherwise.
func IsTerminal(state pps.JobState) bool {
switch state {
case pps.JobState_JOB_SUCCESS, pps.JobState_JOB_FAILURE, pps.JobState_JOB_KILLED:
return true
case pps.JobState_JOB_STARTING, pps.JobState_JOB_RUNNING:
return false
default:
panic(fmt.Sprintf("unrecognized job state: %s", state))
}
}
| getResourceListFromSpec | identifier_name |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds |
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward != 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward != 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value != 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
} | conditional_block |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward != 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward != 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value != 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
}) | tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
} | .collect();
let mut result = self.clone(); | random_line_split |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds |
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward != 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward != 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value != 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
} | identifier_body |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct | <'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps, .. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while !runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data) != best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward != 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward != 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value != 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| HillClimbSeedInfo | identifier_name |
helicorder.ts | /*
* Philip Crotwell
* University of South Carolina, 2019
* http://www.seis.sc.edu
*/
import {DateTime, Duration, Interval} from "luxon";
import {removeTrend } from "./filter";
import {Seismogram, SeismogramDisplayData, findMinMaxOverTimeRange} from "./seismogram";
import {SeismogramSegment} from "./seismogramsegment";
import {Seismograph} from "./seismograph";
import {SeismographConfig} from "./seismographconfig";
import {SeisPlotElement} from "./spelement";
import { isDef} from "./util";
export const HELICORDER_ELEMENT = 'sp-helicorder';
/**
* A helicorder-like multi-line seismogram display usually covering 24 hours
*
* @param inSvgParent the parent element, usually a div tag
* @param heliConfig configuration object
* @param seisData the data to display
*/
export class Helicorder extends SeisPlotElement {
constructor(seisData?: Array<SeismogramDisplayData>, seisConfig?: SeismographConfig) {
let heliConfig;
if ( ! seisConfig) {
const timeWindow = Interval.before(DateTime.utc(), Duration.fromObject({hours: 24}));
heliConfig = new HelicorderConfig(timeWindow);
} else if (seisConfig instanceof HelicorderConfig) {
heliConfig = seisConfig;
} else {
heliConfig = HelicorderConfig.fromSeismographConfig(seisConfig);
}
super(seisData, heliConfig);
if (seisData && seisData.length > 1) {
throw new Error(`Helicorder seisData must be length 1, but was ${seisData.length}`);
}
const wrapper = document.createElement('div');
wrapper.setAttribute("class", "wrapper");
this.addStyle(helicorder_css);
this.getShadowRoot().appendChild(wrapper);
// event listener to transform mouse click into time
this.addEventListener("click", evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("heliclick", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener('mousemove', evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("helimousemove", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener("helimousemove", hEvent => {
const detail = (hEvent as CustomEvent).detail as HeliMouseEventType;
wrapper.querySelectorAll(`sp-seismograph`).forEach( (seismograph, idx) => {
if (idx === detail.lineNum) | else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get height(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, lineInterval: Interval): SeismogramDisplayData {
let lineCutSeis = null;
let lineSeisData;
if (singleSeisData.seismogram) {
lineCutSeis = singleSeisData.seismogram.cut(lineInterval);
if (lineCutSeis && this.heliConfig.detrendLines) {
lineCutSeis = removeTrend(lineCutSeis);
}
lineSeisData = singleSeisData.cloneWithNewSeismogram(lineCutSeis);
} else {
// no data in window, but keep seisData in case of markers, etc
lineSeisData = singleSeisData.clone();
}
lineSeisData.timeRange = lineInterval;
return lineSeisData;
}
/**
* Calculates the time range covered by each line of the display
*
* @param startTime start of display
* @param secondsPerLine seconds covered by each line
* @param numberOfLines number of lines
* @returns Array of HeliTimeRange, one per line
*/
calcTimesForLines(
startTime: DateTime,
secondsPerLine: number,
numberOfLines: number,
): Array<HeliTimeRange> {
const out = [];
let s = startTime;
const durationPerLine = Duration.fromMillis(secondsPerLine*1000);
for (let lineNum = 0; lineNum < numberOfLines; lineNum++) {
const startEnd = new HeliTimeRange(s, durationPerLine, lineNum);
out.push(startEnd);
s = startEnd.interval.end;
}
return out;
}
calcDetailForEvent(evt: MouseEvent): HeliMouseEventType {
const heliMargin = this.heliConfig.margin;
const margin = this.heliConfig.lineSeisConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - (heliMargin.top+heliMargin.bottom )) /
(nl - (nl - 1) * this.heliConfig.overlap);
let clickLine = 0;
if (evt.offsetY < heliMargin.top+baseHeight*(0.5)) {
clickLine = 0;
} else {
clickLine = Math.round(((evt.offsetY-heliMargin.top)-baseHeight*(0.5))/
(baseHeight*(1-this.heliConfig.overlap)));
}
const timeRange = this.heliConfig.fixedTimeScale;
if ( timeRange ) {
const timeLineFraction = (evt.offsetX-margin.left)/(this.width-margin.left-margin.right);
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
const clickTime = timeRange.start.plus(Duration.fromMillis((clickLine+timeLineFraction)*secondsPerLine*1000));
return {
mouseevent: evt,
time: clickTime,
lineNum: clickLine,
};
} else {
throw new Error("Helicorder must be fixedTimeScale");
}
}
}
export const DEFAULT_MAX_HEIGHT = 600;
/**
* Configuration of the helicorder
*
* Note that setting maxVariation=0 and fixedAmplitudeScale=[0,0] will scale the
* data to max
*
* @param timeRange the time range covered by the helicorder, required
*/
export class HelicorderConfig extends SeismographConfig {
lineSeisConfig: SeismographConfig;
overlap: number;
numLines: number;
maxVariation: number;
detrendLines = false;
constructor(timeRange: Interval) {
super();
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
this.fixedTimeScale = timeRange;
this.maxVariation = 0;
this.maxHeight = DEFAULT_MAX_HEIGHT;
this.xLabel = "";
this.yLabel = "";
this.xSublabel = "";
this.ySublabel = " ";
this.ySublabelIsUnits = false;
this.isXAxis = true;
this.isXAxisTop = true;
this.isYAxis = false;
this.overlap = 0.5;
this.numLines = 12;
this.margin.left = 0;
this.margin.right = 0;
this.margin.top = 40;
this.lineColors = ["skyblue", "olivedrab", "goldenrod"];
this.lineSeisConfig = new SeismographConfig();
this.lineSeisConfig.ySublabel = ` `;
this.lineSeisConfig.xLabel = " ";
this.lineSeisConfig.yLabel = ""; // replace later with `${startTime.toFormat("HH:mm")}`;
this.lineSeisConfig.yLabelOrientation = "horizontal";
this.lineSeisConfig.ySublabelIsUnits = false;
this.lineSeisConfig.isXAxis = false;
this.lineSeisConfig.isYAxis = false;
this.lineSeisConfig.minHeight = 80;
this.lineSeisConfig.margin.top = 0;
this.lineSeisConfig.margin.bottom = 0;
this.lineSeisConfig.margin.left = 37;
this.lineSeisConfig.margin.right = 37;
this.lineSeisConfig.wheelZoom = false;
}
static fromSeismographConfig(seisConfig: SeismographConfig): HelicorderConfig {
if (! seisConfig.fixedTimeScale) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
const heliConfig = new HelicorderConfig(seisConfig.fixedTimeScale);
heliConfig.lineSeisConfig = seisConfig;
heliConfig.lineColors = seisConfig.lineColors;
return heliConfig;
}
}
/**
* Time range for a single line of the helicorder, extends Interval
* to add the line number
*/
export class HeliTimeRange {
lineNumber: number;
interval: Interval;
constructor(
startTime: DateTime,
duration: Duration,
lineNumber: number,
) {
this.interval = Interval.after(startTime, duration);
this.lineNumber = lineNumber;
}
}
/** default styling for helicorder plots. */
export const helicorder_css = `
:host {
display: block;
min-height: 200px;
height: 100%;
cursor: crosshair;
}
`;
export const HELICORDER_SELECTOR = "helicorder";
export const HELI_COLOR_CSS_ID = "helicordercolors";
export type HeliMouseEventType = {
mouseevent: MouseEvent,
time: DateTime,
lineNum: number,
}
customElements.define(HELICORDER_ELEMENT, Helicorder);
| {
let selectedStyle = seismograph.shadowRoot?.querySelector("style.selection");
if ( ! selectedStyle) {
selectedStyle = document.createElement('style');
seismograph.shadowRoot?.insertBefore(selectedStyle, seismograph.shadowRoot?.firstChild);
selectedStyle.setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} | conditional_block |
helicorder.ts | /*
* Philip Crotwell
* University of South Carolina, 2019
* http://www.seis.sc.edu
*/
import {DateTime, Duration, Interval} from "luxon";
import {removeTrend } from "./filter";
import {Seismogram, SeismogramDisplayData, findMinMaxOverTimeRange} from "./seismogram";
import {SeismogramSegment} from "./seismogramsegment";
import {Seismograph} from "./seismograph";
import {SeismographConfig} from "./seismographconfig";
import {SeisPlotElement} from "./spelement";
import { isDef} from "./util";
export const HELICORDER_ELEMENT = 'sp-helicorder';
/**
* A helicorder-like multi-line seismogram display usually covering 24 hours
*
* @param inSvgParent the parent element, usually a div tag
* @param heliConfig configuration object
* @param seisData the data to display
*/
export class Helicorder extends SeisPlotElement {
constructor(seisData?: Array<SeismogramDisplayData>, seisConfig?: SeismographConfig) {
let heliConfig;
if ( ! seisConfig) {
const timeWindow = Interval.before(DateTime.utc(), Duration.fromObject({hours: 24}));
heliConfig = new HelicorderConfig(timeWindow);
} else if (seisConfig instanceof HelicorderConfig) {
heliConfig = seisConfig;
} else {
heliConfig = HelicorderConfig.fromSeismographConfig(seisConfig);
}
super(seisData, heliConfig);
if (seisData && seisData.length > 1) {
throw new Error(`Helicorder seisData must be length 1, but was ${seisData.length}`);
}
const wrapper = document.createElement('div');
wrapper.setAttribute("class", "wrapper");
this.addStyle(helicorder_css);
this.getShadowRoot().appendChild(wrapper);
// event listener to transform mouse click into time
this.addEventListener("click", evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("heliclick", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener('mousemove', evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("helimousemove", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener("helimousemove", hEvent => {
const detail = (hEvent as CustomEvent).detail as HeliMouseEventType;
wrapper.querySelectorAll(`sp-seismograph`).forEach( (seismograph, idx) => {
if (idx === detail.lineNum) {
let selectedStyle = seismograph.shadowRoot?.querySelector("style.selection");
if ( ! selectedStyle) {
selectedStyle = document.createElement('style');
seismograph.shadowRoot?.insertBefore(selectedStyle, seismograph.shadowRoot?.firstChild);
selectedStyle.setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get height(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px; | }
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, lineInterval: Interval): SeismogramDisplayData {
let lineCutSeis = null;
let lineSeisData;
if (singleSeisData.seismogram) {
lineCutSeis = singleSeisData.seismogram.cut(lineInterval);
if (lineCutSeis && this.heliConfig.detrendLines) {
lineCutSeis = removeTrend(lineCutSeis);
}
lineSeisData = singleSeisData.cloneWithNewSeismogram(lineCutSeis);
} else {
// no data in window, but keep seisData in case of markers, etc
lineSeisData = singleSeisData.clone();
}
lineSeisData.timeRange = lineInterval;
return lineSeisData;
}
/**
* Calculates the time range covered by each line of the display
*
* @param startTime start of display
* @param secondsPerLine seconds covered by each line
* @param numberOfLines number of lines
* @returns Array of HeliTimeRange, one per line
*/
calcTimesForLines(
startTime: DateTime,
secondsPerLine: number,
numberOfLines: number,
): Array<HeliTimeRange> {
const out = [];
let s = startTime;
const durationPerLine = Duration.fromMillis(secondsPerLine*1000);
for (let lineNum = 0; lineNum < numberOfLines; lineNum++) {
const startEnd = new HeliTimeRange(s, durationPerLine, lineNum);
out.push(startEnd);
s = startEnd.interval.end;
}
return out;
}
calcDetailForEvent(evt: MouseEvent): HeliMouseEventType {
const heliMargin = this.heliConfig.margin;
const margin = this.heliConfig.lineSeisConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - (heliMargin.top+heliMargin.bottom )) /
(nl - (nl - 1) * this.heliConfig.overlap);
let clickLine = 0;
if (evt.offsetY < heliMargin.top+baseHeight*(0.5)) {
clickLine = 0;
} else {
clickLine = Math.round(((evt.offsetY-heliMargin.top)-baseHeight*(0.5))/
(baseHeight*(1-this.heliConfig.overlap)));
}
const timeRange = this.heliConfig.fixedTimeScale;
if ( timeRange ) {
const timeLineFraction = (evt.offsetX-margin.left)/(this.width-margin.left-margin.right);
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
const clickTime = timeRange.start.plus(Duration.fromMillis((clickLine+timeLineFraction)*secondsPerLine*1000));
return {
mouseevent: evt,
time: clickTime,
lineNum: clickLine,
};
} else {
throw new Error("Helicorder must be fixedTimeScale");
}
}
}
export const DEFAULT_MAX_HEIGHT = 600;
/**
* Configuration of the helicorder
*
* Note that setting maxVariation=0 and fixedAmplitudeScale=[0,0] will scale the
* data to max
*
* @param timeRange the time range covered by the helicorder, required
*/
export class HelicorderConfig extends SeismographConfig {
lineSeisConfig: SeismographConfig;
overlap: number;
numLines: number;
maxVariation: number;
detrendLines = false;
constructor(timeRange: Interval) {
super();
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
this.fixedTimeScale = timeRange;
this.maxVariation = 0;
this.maxHeight = DEFAULT_MAX_HEIGHT;
this.xLabel = "";
this.yLabel = "";
this.xSublabel = "";
this.ySublabel = " ";
this.ySublabelIsUnits = false;
this.isXAxis = true;
this.isXAxisTop = true;
this.isYAxis = false;
this.overlap = 0.5;
this.numLines = 12;
this.margin.left = 0;
this.margin.right = 0;
this.margin.top = 40;
this.lineColors = ["skyblue", "olivedrab", "goldenrod"];
this.lineSeisConfig = new SeismographConfig();
this.lineSeisConfig.ySublabel = ` `;
this.lineSeisConfig.xLabel = " ";
this.lineSeisConfig.yLabel = ""; // replace later with `${startTime.toFormat("HH:mm")}`;
this.lineSeisConfig.yLabelOrientation = "horizontal";
this.lineSeisConfig.ySublabelIsUnits = false;
this.lineSeisConfig.isXAxis = false;
this.lineSeisConfig.isYAxis = false;
this.lineSeisConfig.minHeight = 80;
this.lineSeisConfig.margin.top = 0;
this.lineSeisConfig.margin.bottom = 0;
this.lineSeisConfig.margin.left = 37;
this.lineSeisConfig.margin.right = 37;
this.lineSeisConfig.wheelZoom = false;
}
static fromSeismographConfig(seisConfig: SeismographConfig): HelicorderConfig {
if (! seisConfig.fixedTimeScale) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
const heliConfig = new HelicorderConfig(seisConfig.fixedTimeScale);
heliConfig.lineSeisConfig = seisConfig;
heliConfig.lineColors = seisConfig.lineColors;
return heliConfig;
}
}
/**
* Time range for a single line of the helicorder, extends Interval
* to add the line number
*/
export class HeliTimeRange {
lineNumber: number;
interval: Interval;
constructor(
startTime: DateTime,
duration: Duration,
lineNumber: number,
) {
this.interval = Interval.after(startTime, duration);
this.lineNumber = lineNumber;
}
}
/** default styling for helicorder plots. */
export const helicorder_css = `
:host {
display: block;
min-height: 200px;
height: 100%;
cursor: crosshair;
}
`;
export const HELICORDER_SELECTOR = "helicorder";
export const HELI_COLOR_CSS_ID = "helicordercolors";
export type HeliMouseEventType = {
mouseevent: MouseEvent,
time: DateTime,
lineNum: number,
}
customElements.define(HELICORDER_ELEMENT, Helicorder); | justify-content: space-between;
width: 100%;
z-index: -1;
}
`; | random_line_split |
helicorder.ts | /*
* Philip Crotwell
* University of South Carolina, 2019
* http://www.seis.sc.edu
*/
import {DateTime, Duration, Interval} from "luxon";
import {removeTrend } from "./filter";
import {Seismogram, SeismogramDisplayData, findMinMaxOverTimeRange} from "./seismogram";
import {SeismogramSegment} from "./seismogramsegment";
import {Seismograph} from "./seismograph";
import {SeismographConfig} from "./seismographconfig";
import {SeisPlotElement} from "./spelement";
import { isDef} from "./util";
export const HELICORDER_ELEMENT = 'sp-helicorder';
/**
* A helicorder-like multi-line seismogram display usually covering 24 hours
*
* @param inSvgParent the parent element, usually a div tag
* @param heliConfig configuration object
* @param seisData the data to display
*/
export class Helicorder extends SeisPlotElement {
constructor(seisData?: Array<SeismogramDisplayData>, seisConfig?: SeismographConfig) {
let heliConfig;
if ( ! seisConfig) {
const timeWindow = Interval.before(DateTime.utc(), Duration.fromObject({hours: 24}));
heliConfig = new HelicorderConfig(timeWindow);
} else if (seisConfig instanceof HelicorderConfig) {
heliConfig = seisConfig;
} else {
heliConfig = HelicorderConfig.fromSeismographConfig(seisConfig);
}
super(seisData, heliConfig);
if (seisData && seisData.length > 1) {
throw new Error(`Helicorder seisData must be length 1, but was ${seisData.length}`);
}
const wrapper = document.createElement('div');
wrapper.setAttribute("class", "wrapper");
this.addStyle(helicorder_css);
this.getShadowRoot().appendChild(wrapper);
// event listener to transform mouse click into time
this.addEventListener("click", evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("heliclick", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener('mousemove', evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("helimousemove", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener("helimousemove", hEvent => {
const detail = (hEvent as CustomEvent).detail as HeliMouseEventType;
wrapper.querySelectorAll(`sp-seismograph`).forEach( (seismograph, idx) => {
if (idx === detail.lineNum) {
let selectedStyle = seismograph.shadowRoot?.querySelector("style.selection");
if ( ! selectedStyle) {
selectedStyle = document.createElement('style');
seismograph.shadowRoot?.insertBefore(selectedStyle, seismograph.shadowRoot?.firstChild);
selectedStyle.setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get | (): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, lineInterval: Interval): SeismogramDisplayData {
let lineCutSeis = null;
let lineSeisData;
if (singleSeisData.seismogram) {
lineCutSeis = singleSeisData.seismogram.cut(lineInterval);
if (lineCutSeis && this.heliConfig.detrendLines) {
lineCutSeis = removeTrend(lineCutSeis);
}
lineSeisData = singleSeisData.cloneWithNewSeismogram(lineCutSeis);
} else {
// no data in window, but keep seisData in case of markers, etc
lineSeisData = singleSeisData.clone();
}
lineSeisData.timeRange = lineInterval;
return lineSeisData;
}
/**
* Calculates the time range covered by each line of the display
*
* @param startTime start of display
* @param secondsPerLine seconds covered by each line
* @param numberOfLines number of lines
* @returns Array of HeliTimeRange, one per line
*/
calcTimesForLines(
startTime: DateTime,
secondsPerLine: number,
numberOfLines: number,
): Array<HeliTimeRange> {
const out = [];
let s = startTime;
const durationPerLine = Duration.fromMillis(secondsPerLine*1000);
for (let lineNum = 0; lineNum < numberOfLines; lineNum++) {
const startEnd = new HeliTimeRange(s, durationPerLine, lineNum);
out.push(startEnd);
s = startEnd.interval.end;
}
return out;
}
calcDetailForEvent(evt: MouseEvent): HeliMouseEventType {
const heliMargin = this.heliConfig.margin;
const margin = this.heliConfig.lineSeisConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - (heliMargin.top+heliMargin.bottom )) /
(nl - (nl - 1) * this.heliConfig.overlap);
let clickLine = 0;
if (evt.offsetY < heliMargin.top+baseHeight*(0.5)) {
clickLine = 0;
} else {
clickLine = Math.round(((evt.offsetY-heliMargin.top)-baseHeight*(0.5))/
(baseHeight*(1-this.heliConfig.overlap)));
}
const timeRange = this.heliConfig.fixedTimeScale;
if ( timeRange ) {
const timeLineFraction = (evt.offsetX-margin.left)/(this.width-margin.left-margin.right);
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
const clickTime = timeRange.start.plus(Duration.fromMillis((clickLine+timeLineFraction)*secondsPerLine*1000));
return {
mouseevent: evt,
time: clickTime,
lineNum: clickLine,
};
} else {
throw new Error("Helicorder must be fixedTimeScale");
}
}
}
export const DEFAULT_MAX_HEIGHT = 600;
/**
* Configuration of the helicorder
*
* Note that setting maxVariation=0 and fixedAmplitudeScale=[0,0] will scale the
* data to max
*
* @param timeRange the time range covered by the helicorder, required
*/
export class HelicorderConfig extends SeismographConfig {
lineSeisConfig: SeismographConfig;
overlap: number;
numLines: number;
maxVariation: number;
detrendLines = false;
constructor(timeRange: Interval) {
super();
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
this.fixedTimeScale = timeRange;
this.maxVariation = 0;
this.maxHeight = DEFAULT_MAX_HEIGHT;
this.xLabel = "";
this.yLabel = "";
this.xSublabel = "";
this.ySublabel = " ";
this.ySublabelIsUnits = false;
this.isXAxis = true;
this.isXAxisTop = true;
this.isYAxis = false;
this.overlap = 0.5;
this.numLines = 12;
this.margin.left = 0;
this.margin.right = 0;
this.margin.top = 40;
this.lineColors = ["skyblue", "olivedrab", "goldenrod"];
this.lineSeisConfig = new SeismographConfig();
this.lineSeisConfig.ySublabel = ` `;
this.lineSeisConfig.xLabel = " ";
this.lineSeisConfig.yLabel = ""; // replace later with `${startTime.toFormat("HH:mm")}`;
this.lineSeisConfig.yLabelOrientation = "horizontal";
this.lineSeisConfig.ySublabelIsUnits = false;
this.lineSeisConfig.isXAxis = false;
this.lineSeisConfig.isYAxis = false;
this.lineSeisConfig.minHeight = 80;
this.lineSeisConfig.margin.top = 0;
this.lineSeisConfig.margin.bottom = 0;
this.lineSeisConfig.margin.left = 37;
this.lineSeisConfig.margin.right = 37;
this.lineSeisConfig.wheelZoom = false;
}
static fromSeismographConfig(seisConfig: SeismographConfig): HelicorderConfig {
if (! seisConfig.fixedTimeScale) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
const heliConfig = new HelicorderConfig(seisConfig.fixedTimeScale);
heliConfig.lineSeisConfig = seisConfig;
heliConfig.lineColors = seisConfig.lineColors;
return heliConfig;
}
}
/**
* Time range for a single line of the helicorder, extends Interval
* to add the line number
*/
export class HeliTimeRange {
lineNumber: number;
interval: Interval;
constructor(
startTime: DateTime,
duration: Duration,
lineNumber: number,
) {
this.interval = Interval.after(startTime, duration);
this.lineNumber = lineNumber;
}
}
/** default styling for helicorder plots. */
export const helicorder_css = `
:host {
display: block;
min-height: 200px;
height: 100%;
cursor: crosshair;
}
`;
export const HELICORDER_SELECTOR = "helicorder";
export const HELI_COLOR_CSS_ID = "helicordercolors";
export type HeliMouseEventType = {
mouseevent: MouseEvent,
time: DateTime,
lineNum: number,
}
customElements.define(HELICORDER_ELEMENT, Helicorder);
| height | identifier_name |
helicorder.ts | /*
* Philip Crotwell
* University of South Carolina, 2019
* http://www.seis.sc.edu
*/
import {DateTime, Duration, Interval} from "luxon";
import {removeTrend } from "./filter";
import {Seismogram, SeismogramDisplayData, findMinMaxOverTimeRange} from "./seismogram";
import {SeismogramSegment} from "./seismogramsegment";
import {Seismograph} from "./seismograph";
import {SeismographConfig} from "./seismographconfig";
import {SeisPlotElement} from "./spelement";
import { isDef} from "./util";
export const HELICORDER_ELEMENT = 'sp-helicorder';
/**
* A helicorder-like multi-line seismogram display usually covering 24 hours
*
* @param inSvgParent the parent element, usually a div tag
* @param heliConfig configuration object
* @param seisData the data to display
*/
export class Helicorder extends SeisPlotElement {
constructor(seisData?: Array<SeismogramDisplayData>, seisConfig?: SeismographConfig) {
let heliConfig;
if ( ! seisConfig) {
const timeWindow = Interval.before(DateTime.utc(), Duration.fromObject({hours: 24}));
heliConfig = new HelicorderConfig(timeWindow);
} else if (seisConfig instanceof HelicorderConfig) {
heliConfig = seisConfig;
} else {
heliConfig = HelicorderConfig.fromSeismographConfig(seisConfig);
}
super(seisData, heliConfig);
if (seisData && seisData.length > 1) {
throw new Error(`Helicorder seisData must be length 1, but was ${seisData.length}`);
}
const wrapper = document.createElement('div');
wrapper.setAttribute("class", "wrapper");
this.addStyle(helicorder_css);
this.getShadowRoot().appendChild(wrapper);
// event listener to transform mouse click into time
this.addEventListener("click", evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("heliclick", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener('mousemove', evt => {
const detail = this.calcDetailForEvent(evt);
const event = new CustomEvent("helimousemove", { detail: detail});
this.dispatchEvent(event);
});
this.addEventListener("helimousemove", hEvent => {
const detail = (hEvent as CustomEvent).detail as HeliMouseEventType;
wrapper.querySelectorAll(`sp-seismograph`).forEach( (seismograph, idx) => {
if (idx === detail.lineNum) {
let selectedStyle = seismograph.shadowRoot?.querySelector("style.selection");
if ( ! selectedStyle) {
selectedStyle = document.createElement('style');
seismograph.shadowRoot?.insertBefore(selectedStyle, seismograph.shadowRoot?.firstChild);
selectedStyle.setAttribute("class", "selection");
selectedStyle.textContent = `
svg g.yLabel text {
font-weight: bold;
text-decoration: underline;
}
`;
}
} else {
seismograph.shadowRoot?.querySelector("style.selection")?.remove();
}
});
});
}
get heliConfig(): HelicorderConfig {
return this.seismographConfig as HelicorderConfig;
}
set heliConfig(config: HelicorderConfig) {
this.seismographConfig = config;
}
get width(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.width;
}
get height(): number {
const wrapper = (this.getShadowRoot().querySelector('div.wrapper') as HTMLDivElement);
const rect = wrapper.getBoundingClientRect();
return rect.height;
}
appendSegment(segment: SeismogramSegment) {
const segMinMax = segment.findMinMax();
const origMinMax = this.heliConfig.fixedAmplitudeScale;
const heliTimeRange = this.heliConfig.fixedTimeScale;
if (!heliTimeRange) { throw new Error("Heli is not fixedTimeScale");}
if (heliTimeRange.end < segment.timeRange.end) {
const lineDuration = Duration.fromMillis(
heliTimeRange.toDuration().toMillis() / this.heliConfig.numLines);
this.heliConfig.fixedTimeScale =
Interval.fromDateTimes(
heliTimeRange.start.plus(lineDuration),
heliTimeRange.end.plus(lineDuration)
);
this.draw();
}
if (this.seisData && this.seisData.length > 0) {
const singleSeisData = this.seisData[0];
singleSeisData.append(segment);
if (heliTimeRange.end < segment.timeRange.end ||
(origMinMax &&
(segMinMax.min < origMinMax[0] ||
origMinMax[1] < segMinMax.max))) {
this.draw(); //redraw because amp changed
} else {
// only redraw overlaping graphs
const seismographList = (this.shadowRoot ? Array.from(this.shadowRoot.querySelectorAll('sp-seismograph')) : []) as Array<Seismograph>;
seismographList.forEach(seisGraph => {
const lineInterval = seisGraph.displayTimeRangeForSeisDisplayData(singleSeisData);
if (segment.timeRange.intersection(lineInterval)) {
// overlaps
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
seisGraph.seisData = [lineSeisData];
}
});
}
} else {
// heli is empty
const sdd = SeismogramDisplayData.fromSeismogram(new Seismogram(segment));
this.seisData = [sdd];
}
}
/**
* draws, or redraws, the helicorder.
*/
draw() {
this.heliConfig.lineSeisConfig.amplitudeMode = this.heliConfig.amplitudeMode;
this.drawSeismograms();
}
/**
* draws or redraws the seismograms in the helicorder
*
* @private
*/
drawSeismograms(): void {
if ( ! this.isConnected) { return; }
const wrapper = (this.getShadowRoot().querySelector('div') as HTMLDivElement);
const timeRange = this.heliConfig.fixedTimeScale;
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
let maxVariation = 1;
let singleSeisData;
if (this.seisData.length !== 0) {
singleSeisData = this.seisData[0];
} else {
singleSeisData = new SeismogramDisplayData(timeRange);
}
if (singleSeisData.seismogram) {
const mul_percent = 1.01;
if (!this.heliConfig.fixedAmplitudeScale || (
this.heliConfig.fixedAmplitudeScale[0] === 0 && this.heliConfig.fixedAmplitudeScale[1] === 0
)) {
if (this.heliConfig.maxVariation === 0) {
if (singleSeisData.seismogram.timeRange.overlaps(timeRange)) {
const minMax = findMinMaxOverTimeRange([singleSeisData],
timeRange,
false,
this.heliConfig.amplitudeMode);
maxVariation = minMax.expandPercentage(mul_percent).fullWidth;
}
} else {
maxVariation = this.heliConfig.maxVariation;
}
}
}
const startTime = timeRange.start;
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
wrapper.querySelectorAll("sp-seismograph").forEach(e => e.remove());
const lineTimes = this.calcTimesForLines(
startTime,
secondsPerLine,
this.heliConfig.numLines,
);
const margin = this.heliConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - margin.top - margin.bottom) /
(nl - (nl - 1) * this.heliConfig.overlap);
for (const lineTime of lineTimes) {
const lineNumber = lineTime.lineNumber;
const lineInterval = lineTime.interval;
let startTime = lineTime.interval.start;
const endTime = lineTime.interval.end;
let height = baseHeight;
const marginTop =
lineNumber === 0
? 0
: Math.round(-1.0 * height * this.heliConfig.overlap);
const lineSeisConfig = this.heliConfig.lineSeisConfig.clone();
// don't title lines past the first
lineSeisConfig.title = null;
if (lineNumber === 0) {
lineSeisConfig.title = this.heliConfig.title;
lineSeisConfig.isXAxisTop = this.heliConfig.isXAxisTop;
lineSeisConfig.margin.top += this.heliConfig.margin.top;
height += this.heliConfig.margin.top;
} else if (lineNumber === nl - 1) {
lineSeisConfig.isXAxis = this.heliConfig.isXAxis;
lineSeisConfig.margin.bottom += this.heliConfig.margin.bottom;
height += this.heliConfig.margin.bottom;
}
lineSeisConfig.fixedTimeScale = lineInterval;
lineSeisConfig.yLabel = `${startTime.toFormat("HH:mm")}`;
lineSeisConfig.yLabelRight = `${endTime.toFormat("HH:mm")}`;
lineSeisConfig.lineColors = [
this.heliConfig.lineColors[
lineNumber % this.heliConfig.lineColors.length
],
];
const lineSeisData = this.cutForLine(singleSeisData, lineInterval);
if (this.heliConfig.fixedAmplitudeScale && (
this.heliConfig.fixedAmplitudeScale[0] !== 0 || this.heliConfig.fixedAmplitudeScale[1] !== 0
)) {
lineSeisConfig.fixedAmplitudeScale = this.heliConfig.fixedAmplitudeScale;
} else {
lineSeisConfig.fixedAmplitudeScale = [
-1* maxVariation,
maxVariation,
];
}
const seismograph = new Seismograph([lineSeisData], lineSeisConfig);
seismograph.svg.classed(HELICORDER_SELECTOR, true);
seismograph.setAttribute("class", "heliLine");
seismograph.setAttribute("style", `height: ${height}px;margin-top: ${marginTop}px`);
const seismographWrapper = (seismograph.shadowRoot?.querySelector('div') as HTMLDivElement);
const styleEl= document.createElement('style');
const seismographRoot = seismograph.shadowRoot;
if (seismographRoot) {
const helicss = seismographRoot.insertBefore(styleEl, seismographWrapper);
helicss.textContent = `
.yLabel text {
font-size: x-small;
fill: ${lineSeisConfig.lineColors[0]};
}
.utclabels {
position: relative;
font-size: x-small;
width: 100%;
}
.utclabels div {
display: flex;
position: absolute;
left: 0px;
justify-content: space-between;
width: 100%;
z-index: -1;
}
`;
}
wrapper.appendChild(seismograph);
if (lineNumber === 0) {
const utcDiv = document.createElement('div');
utcDiv.setAttribute("class", "utclabels");
const innerDiv = utcDiv.appendChild(document.createElement('div'));
innerDiv.setAttribute("style", `top: ${lineSeisConfig.margin.top}px;`);
const textEl = innerDiv.appendChild(document.createElement('text'));
textEl.textContent = "UTC";
// and to top right
const rightTextEl = innerDiv.appendChild(document.createElement('text'));
rightTextEl.textContent = "UTC";
seismographWrapper.insertBefore(utcDiv, seismographWrapper.firstChild);
}
startTime = endTime;
}
}
cutForLine(singleSeisData: SeismogramDisplayData, lineInterval: Interval): SeismogramDisplayData {
let lineCutSeis = null;
let lineSeisData;
if (singleSeisData.seismogram) {
lineCutSeis = singleSeisData.seismogram.cut(lineInterval);
if (lineCutSeis && this.heliConfig.detrendLines) {
lineCutSeis = removeTrend(lineCutSeis);
}
lineSeisData = singleSeisData.cloneWithNewSeismogram(lineCutSeis);
} else {
// no data in window, but keep seisData in case of markers, etc
lineSeisData = singleSeisData.clone();
}
lineSeisData.timeRange = lineInterval;
return lineSeisData;
}
/**
* Calculates the time range covered by each line of the display
*
* @param startTime start of display
* @param secondsPerLine seconds covered by each line
* @param numberOfLines number of lines
* @returns Array of HeliTimeRange, one per line
*/
calcTimesForLines(
startTime: DateTime,
secondsPerLine: number,
numberOfLines: number,
): Array<HeliTimeRange> {
const out = [];
let s = startTime;
const durationPerLine = Duration.fromMillis(secondsPerLine*1000);
for (let lineNum = 0; lineNum < numberOfLines; lineNum++) {
const startEnd = new HeliTimeRange(s, durationPerLine, lineNum);
out.push(startEnd);
s = startEnd.interval.end;
}
return out;
}
calcDetailForEvent(evt: MouseEvent): HeliMouseEventType {
const heliMargin = this.heliConfig.margin;
const margin = this.heliConfig.lineSeisConfig.margin;
const nl = this.heliConfig.numLines;
const maxHeight =
this.heliConfig.maxHeight !== null
? this.heliConfig.maxHeight
: DEFAULT_MAX_HEIGHT;
const baseHeight =
(maxHeight - (heliMargin.top+heliMargin.bottom )) /
(nl - (nl - 1) * this.heliConfig.overlap);
let clickLine = 0;
if (evt.offsetY < heliMargin.top+baseHeight*(0.5)) {
clickLine = 0;
} else {
clickLine = Math.round(((evt.offsetY-heliMargin.top)-baseHeight*(0.5))/
(baseHeight*(1-this.heliConfig.overlap)));
}
const timeRange = this.heliConfig.fixedTimeScale;
if ( timeRange ) {
const timeLineFraction = (evt.offsetX-margin.left)/(this.width-margin.left-margin.right);
const secondsPerLine =
timeRange.toDuration().toMillis() / 1000 / this.heliConfig.numLines;
const clickTime = timeRange.start.plus(Duration.fromMillis((clickLine+timeLineFraction)*secondsPerLine*1000));
return {
mouseevent: evt,
time: clickTime,
lineNum: clickLine,
};
} else {
throw new Error("Helicorder must be fixedTimeScale");
}
}
}
export const DEFAULT_MAX_HEIGHT = 600;
/**
* Configuration of the helicorder
*
* Note that setting maxVariation=0 and fixedAmplitudeScale=[0,0] will scale the
* data to max
*
* @param timeRange the time range covered by the helicorder, required
*/
export class HelicorderConfig extends SeismographConfig {
lineSeisConfig: SeismographConfig;
overlap: number;
numLines: number;
maxVariation: number;
detrendLines = false;
constructor(timeRange: Interval) {
super();
if (!isDef(timeRange)) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
this.fixedTimeScale = timeRange;
this.maxVariation = 0;
this.maxHeight = DEFAULT_MAX_HEIGHT;
this.xLabel = "";
this.yLabel = "";
this.xSublabel = "";
this.ySublabel = " ";
this.ySublabelIsUnits = false;
this.isXAxis = true;
this.isXAxisTop = true;
this.isYAxis = false;
this.overlap = 0.5;
this.numLines = 12;
this.margin.left = 0;
this.margin.right = 0;
this.margin.top = 40;
this.lineColors = ["skyblue", "olivedrab", "goldenrod"];
this.lineSeisConfig = new SeismographConfig();
this.lineSeisConfig.ySublabel = ` `;
this.lineSeisConfig.xLabel = " ";
this.lineSeisConfig.yLabel = ""; // replace later with `${startTime.toFormat("HH:mm")}`;
this.lineSeisConfig.yLabelOrientation = "horizontal";
this.lineSeisConfig.ySublabelIsUnits = false;
this.lineSeisConfig.isXAxis = false;
this.lineSeisConfig.isYAxis = false;
this.lineSeisConfig.minHeight = 80;
this.lineSeisConfig.margin.top = 0;
this.lineSeisConfig.margin.bottom = 0;
this.lineSeisConfig.margin.left = 37;
this.lineSeisConfig.margin.right = 37;
this.lineSeisConfig.wheelZoom = false;
}
static fromSeismographConfig(seisConfig: SeismographConfig): HelicorderConfig {
if (! seisConfig.fixedTimeScale) {
throw new Error("Helicorder config must have fixedTimeScale set");
}
const heliConfig = new HelicorderConfig(seisConfig.fixedTimeScale);
heliConfig.lineSeisConfig = seisConfig;
heliConfig.lineColors = seisConfig.lineColors;
return heliConfig;
}
}
/**
* Time range for a single line of the helicorder, extends Interval
* to add the line number
*/
export class HeliTimeRange {
lineNumber: number;
interval: Interval;
constructor(
startTime: DateTime,
duration: Duration,
lineNumber: number,
) |
}
/** default styling for helicorder plots. */
export const helicorder_css = `
:host {
display: block;
min-height: 200px;
height: 100%;
cursor: crosshair;
}
`;
export const HELICORDER_SELECTOR = "helicorder";
export const HELI_COLOR_CSS_ID = "helicordercolors";
export type HeliMouseEventType = {
mouseevent: MouseEvent,
time: DateTime,
lineNum: number,
}
customElements.define(HELICORDER_ELEMENT, Helicorder);
| {
this.interval = Interval.after(startTime, duration);
this.lineNumber = lineNumber;
} | identifier_body |
testkeras.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import glob
from root_numpy import root2array
from numpy.lib.recfunctions import stack_arrays
def root2pandas(files_path, tree_name, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2array, such as branches to consider, start, stop, step, etc
Returns:
--------
output_panda: a pandas dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
# -- create list of .root files to process | ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
def flatten(column):
'''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
'''
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''
#compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat_cls, sample_weight=w_test)
np.set_printoptions(precision=4)
plot_confusion_matrix(cnf_matrix, classes=['Sig', 'Bkg'],
normalize=True,
title='Normalized confusion matrix')
# signal eff = weighted tpr --> out of all signal events, what % for we classify as signal?
print 'Signal efficiency:', w_test[(y_test == 1) & (yhat_cls == 1)].sum() / w_test[y_test == 1].sum()
# bkg eff = weighted fpr --> out of all bkg events, what % do we classify as signal?
b_eff = w_test[(y_test != 0) & (yhat_cls == 0)].sum() / w_test[y_test != 0].sum()
print 'Background efficiency:', b_eff
print 'Background rej:', 1 / b_eff
'''
# -- events that got assigned to class 0
predicted_sig = df_full.iloc[np.array(ix_test)[yhat_cls == 0]]
predicted_sig['true'] = y_test[yhat_cls == 0]
print predicted_sig.head()
plt.clf()
bins = np.linspace(0, 1, 20)
#For normalization
wes = np.ones_like(yhat[y_test==1])/len(yhat[y_test==1])
web = np.ones_like(yhat[y_test==0])/len(yhat[y_test==0])
_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins, weights=wes)
_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins, weights=web)
#_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins)
#_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins)
plt.legend(loc='upper center')
plt.xlabel('P(signal) assigned by the model')
plt.tight_layout()
plt.savefig('sigbkg.png')
plt.close('all')
print "Sum of weights of first layer mapped to input variable: "
we = model.layers[1].get_weights()
for i in range(len(we[0])):
print npart[i], " : ", sum(we[0][i])
print "Making ROC Curves. . ."
from sklearn.metrics import roc_curve,roc_auc_score
#fpr = false positive, tpr = true positive
fpr, tpr,thresholds = roc_curve(y_test,yhat)
auc = roc_auc_score(y_test,yhat)
plt.figure(figsize=(10,10))
#plt.grid(b = True, which = 'minor')
#plt.grid(b = True, which = 'major')
_=plt.plot(tpr,1.-fpr, label='Model: AUC=%.3f' %auc)
plt.legend()
plt.xlim(0.,1.2)
plt.ylim(0.,1.4)
#plt.yscale('log')
plt.savefig('roc.png')
plt.clf()
print "Donezo" | files = glob.glob(files_path)
# -- process ntuples into rec arrays | random_line_split |
testkeras.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import glob
from root_numpy import root2array
from numpy.lib.recfunctions import stack_arrays
def root2pandas(files_path, tree_name, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2array, such as branches to consider, start, stop, step, etc
Returns:
--------
output_panda: a pandas dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
# -- create list of .root files to process
files = glob.glob(files_path)
# -- process ntuples into rec arrays
ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
def flatten(column):
'''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
'''
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
|
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''
#compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat_cls, sample_weight=w_test)
np.set_printoptions(precision=4)
plot_confusion_matrix(cnf_matrix, classes=['Sig', 'Bkg'],
normalize=True,
title='Normalized confusion matrix')
# signal eff = weighted tpr --> out of all signal events, what % for we classify as signal?
print 'Signal efficiency:', w_test[(y_test == 1) & (yhat_cls == 1)].sum() / w_test[y_test == 1].sum()
# bkg eff = weighted fpr --> out of all bkg events, what % do we classify as signal?
b_eff = w_test[(y_test != 0) & (yhat_cls == 0)].sum() / w_test[y_test != 0].sum()
print 'Background efficiency:', b_eff
print 'Background rej:', 1 / b_eff
'''
# -- events that got assigned to class 0
predicted_sig = df_full.iloc[np.array(ix_test)[yhat_cls == 0]]
predicted_sig['true'] = y_test[yhat_cls == 0]
print predicted_sig.head()
plt.clf()
bins = np.linspace(0, 1, 20)
#For normalization
wes = np.ones_like(yhat[y_test==1])/len(yhat[y_test==1])
web = np.ones_like(yhat[y_test==0])/len(yhat[y_test==0])
_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins, weights=wes)
_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins, weights=web)
#_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins)
#_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins)
plt.legend(loc='upper center')
plt.xlabel('P(signal) assigned by the model')
plt.tight_layout()
plt.savefig('sigbkg.png')
plt.close('all')
print "Sum of weights of first layer mapped to input variable: "
we = model.layers[1].get_weights()
for i in range(len(we[0])):
print npart[i], " : ", sum(we[0][i])
print "Making ROC Curves. . ."
from sklearn.metrics import roc_curve,roc_auc_score
#fpr = false positive, tpr = true positive
fpr, tpr,thresholds = roc_curve(y_test,yhat)
auc = roc_auc_score(y_test,yhat)
plt.figure(figsize=(10,10))
#plt.grid(b = True, which = 'minor')
#plt.grid(b = True, which = 'major')
_=plt.plot(tpr,1.-fpr, label='Model: AUC=%.3f' %auc)
plt.legend()
plt.xlim(0.,1.2)
plt.ylim(0.,1.4)
#plt.yscale('log')
plt.savefig('roc.png')
plt.clf()
print "Donezo"
| plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black") | conditional_block |
testkeras.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import glob
from root_numpy import root2array
from numpy.lib.recfunctions import stack_arrays
def root2pandas(files_path, tree_name, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2array, such as branches to consider, start, stop, step, etc
Returns:
--------
output_panda: a pandas dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
# -- create list of .root files to process
files = glob.glob(files_path)
# -- process ntuples into rec arrays
ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
def flatten(column):
|
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''
#compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat_cls, sample_weight=w_test)
np.set_printoptions(precision=4)
plot_confusion_matrix(cnf_matrix, classes=['Sig', 'Bkg'],
normalize=True,
title='Normalized confusion matrix')
# signal eff = weighted tpr --> out of all signal events, what % for we classify as signal?
print 'Signal efficiency:', w_test[(y_test == 1) & (yhat_cls == 1)].sum() / w_test[y_test == 1].sum()
# bkg eff = weighted fpr --> out of all bkg events, what % do we classify as signal?
b_eff = w_test[(y_test != 0) & (yhat_cls == 0)].sum() / w_test[y_test != 0].sum()
print 'Background efficiency:', b_eff
print 'Background rej:', 1 / b_eff
'''
# -- events that got assigned to class 0
predicted_sig = df_full.iloc[np.array(ix_test)[yhat_cls == 0]]
predicted_sig['true'] = y_test[yhat_cls == 0]
print predicted_sig.head()
plt.clf()
bins = np.linspace(0, 1, 20)
#For normalization
wes = np.ones_like(yhat[y_test==1])/len(yhat[y_test==1])
web = np.ones_like(yhat[y_test==0])/len(yhat[y_test==0])
_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins, weights=wes)
_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins, weights=web)
#_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins)
#_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins)
plt.legend(loc='upper center')
plt.xlabel('P(signal) assigned by the model')
plt.tight_layout()
plt.savefig('sigbkg.png')
plt.close('all')
print "Sum of weights of first layer mapped to input variable: "
we = model.layers[1].get_weights()
for i in range(len(we[0])):
print npart[i], " : ", sum(we[0][i])
print "Making ROC Curves. . ."
from sklearn.metrics import roc_curve,roc_auc_score
#fpr = false positive, tpr = true positive
fpr, tpr,thresholds = roc_curve(y_test,yhat)
auc = roc_auc_score(y_test,yhat)
plt.figure(figsize=(10,10))
#plt.grid(b = True, which = 'minor')
#plt.grid(b = True, which = 'major')
_=plt.plot(tpr,1.-fpr, label='Model: AUC=%.3f' %auc)
plt.legend()
plt.xlim(0.,1.2)
plt.ylim(0.,1.4)
#plt.yscale('log')
plt.savefig('roc.png')
plt.clf()
print "Donezo"
| '''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
''' | identifier_body |
testkeras.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import glob
from root_numpy import root2array
from numpy.lib.recfunctions import stack_arrays
def root2pandas(files_path, tree_name, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2array, such as branches to consider, start, stop, step, etc
Returns:
--------
output_panda: a pandas dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
# -- create list of .root files to process
files = glob.glob(files_path)
# -- process ntuples into rec arrays
ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
def flatten(column):
'''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
'''
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
########################################################################################
fiSig = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410980*/410980.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._000010.output.root'
fiBkg = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.410470.PhPy8EG.DAOD_TOPQ1.e6337_s3126_r9364_p3629.Oct18-v2_output_root/user.jbarkelo.15859005._00001*.root'
fiSig2 = '/scratch/jbarkelo/20181026Ntuples/user.jbarkelo.41098*/410981.root'
#sig = root2pandas(fiSig,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
#sigFriend = root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend')
SRSelect = '(ejets_2015||ejets_2016||ejets_2017)&&(ph_pt[0]>50000)&&(len(jet_e)>=2)'
sig = pd.concat([root2pandas(fiSig,'nominal'),root2pandas(fiSig2,'nominal')])
sigFriend = pd.concat([root2pandas(fiSig+'FCNCFriend','nominalFCNCFriend'),root2pandas(fiSig2+'FCNCFriend', 'nominalFCNCFriend')])
sig_df = pd.concat([sig,sigFriend], axis=1,join_axes=[sig.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
sig_df = sig_df[sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017'] ==1]
sig_df.reset_index(drop=True)
sig_df = sig_df.assign(ph_e0=pd.Series([i[0] for i in sig_df['ph_e']],index=sig_df.index))
sig_df = sig_df.assign(ph_pt0=pd.Series([i[0] for i in sig_df['ph_pt']],index=sig_df.index))
sig_df = sig_df.assign(ph_eta0=pd.Series([i[0] for i in sig_df['ph_eta']],index=sig_df.index))
sig_df = sig_df.assign(ph_phi0=pd.Series([i[0] for i in sig_df['ph_phi']],index=sig_df.index))
#Not an ideal way to do this but it will work for now, grabs leading photon information
bkg = root2pandas(fiBkg,'nominal',selection = 'ejets_2015 >0||ejets_2016>0||ejets_2017>0')
bkgFriend = root2pandas(fiBkg+'FCNCFriend','nominalFCNCFriend')
bkg_df = pd.concat([bkg,bkgFriend], axis=1,join_axes=[bkg.index])
#Now to go through and do a specific selection on the data frame itself, this is like a selection for ejets
bkg_df = bkg_df[bkg_df.loc[:,'ejets_2015']+ bkg_df.loc[:,'ejets_2016']+bkg_df.loc[:,'ejets_2017'] ==1]
bkg_df.reset_index(drop=True)
bkg_df = bkg_df.assign(ph_e0=pd.Series([i[0] for i in bkg_df['ph_e']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_pt0=pd.Series([i[0] for i in bkg_df['ph_pt']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_eta0=pd.Series([i[0] for i in bkg_df['ph_eta']],index=bkg_df.index))
bkg_df = bkg_df.assign(ph_phi0=pd.Series([i[0] for i in bkg_df['ph_phi']],index=bkg_df.index))
#Weight placeholders
#bkgw =bkg_df.loc[:,'mujets_2015']+ bkg_df.loc[:,'mujets_2016']+bkg_df.loc[:,'mujets_2017'] #mu just place holder to set bkgweights to 0 for testing!!! Barkeloo
#sigw =sig_df.loc[:,'ejets_2015']+ sig_df.loc[:,'ejets_2016']+sig_df.loc[:,'ejets_2017']
#w=pd.concat((sigw,bkgw),ignore_index=True).values
w = pd.concat((sig_df['weight_mc'],bkg_df['weight_mc']),ignore_index=True).values
##can run something like b = root2pandas(fiSig,'nominal', selection = 'ejets_2015 >0||ejets_2016>0') for a selection like in http://scikit-hep.org/root_numpy/start.html#a-quick-tutorial
print sig.keys()
## Names of some event-level branches
npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2','ph_phi0','ph_pt0','ph_eta0','ph_e0']#,'m_lgam','m_tSM','m_qgam','nu_chi2','sm_chi2','w_chi2']
#npart = ['el_e','el_eta','el_phi','el_pt','el_charge','el_ptvarcone20','el_topoetcone20','met_met','met_phi','ph_phi0','ph_pt0','ph_eta0','ph_e0']
'''
for key in npart: # loop through the event-level branches and plot them on separate histograms
# -- set font and canvas size (optional)
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8), dpi=100)
# -- declare common binning strategy (otherwise every histogram will have its own binning)
bins = np.linspace(min(sig_df[key]), max(sig_df[key]) + 1, 30)
# plot!
_ = plt.hist(sig_df[key], histtype='step', normed=False, bins=bins, label=r'FCNC', linewidth=2)
_ = plt.hist(bkg_df[key], histtype='step', normed=False, bins=bins, label=r'ttbar')
plt.xlabel(key)
plt.yscale('log')
plt.legend(loc='best')
plt.savefig(str(key)+'.png')
plt.clf()
'''
df_full = pd.concat((sig_df,bkg_df), ignore_index=True)
df = pd.concat((sig_df[npart],bkg_df[npart]),ignore_index=True)
X=df.values#as_matrix()
type(X)
X.shape
#w=pd.concat((sig_df['ejets_2015'],sig_df['ejets_2016'],sig_df['ejets_2017'],bkg_df['ejets_2015'],bkg_df['ejets_2016'],bkg['ejets_2017']),ignore_index=True).values
type(w)
#Generate an array of truth labels yo distinguish among different classes in the problem
y=[]
for _df, ID in [(sig_df,1),(bkg_df,0)]:
y.extend([ID] * _df.shape[0])
y=np.array(y)
y.shape
ix = range(X.shape[0]) # array of indices, just to keep track of them for safety reasons and future checks
#X_train, X_test, y_train, y_test, w_train, w_test, ix_train, ix_test = train_test_split(X, y, w, ix, train_size=0.8)
######## 80% Train+Validate, 20% test
X_train, X_test, \
y_train, y_test, \
ix_train, ix_test\
= train_test_split(X, y, ix, test_size=0.2)
# 64% train, 16% validate, 20% of original 80%
X_train, X_val,\
y_train, y_val,\
ix_train, ix_val\
=train_test_split(X_train,y_train,ix_train,test_size=0.2)
print "Scaling \n"
from sklearn.preprocessing import StandardScaler, RobustScaler
scaler = StandardScaler()
#scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from keras.models import Model
from keras.layers import Dense, Dropout, Input
inputs = Input(shape=(X_train.shape[1], )) # placeholder
n = X_train.shape[1]
hidden = Dense(n+1, activation='relu')(inputs)
hidden = Dropout(0.2)(hidden)
hidden = Dense(2*n+2, activation='relu')(hidden)
hidden = Dropout(0.2)(hidden)
#hidden = Dense(4*n+4, activation='relu')(hidden)
#hidden = Dropout(0.2)(hidden)
outputs = Dense(1, activation='sigmoid')(hidden)
#outputs = Dense(2, activation='softmax')(hidden) #needs as many
# last layer has to have the same dimensionality as the number of classes we want to predict, here 2
model = Model(inputs, outputs)
model.summary()
from keras.utils.vis_utils import plot_model
#plot_model(model, 'temp.png', show_shapes=True)
#model.compile('adam','sparse_categorical_crossentropy', metrics=['acc'])
model.compile('adam','binary_crossentropy', metrics=['acc'])
from keras.callbacks import EarlyStopping, ModelCheckpoint
from collections import Counter
print "NEvents to train over: ", Counter(y_train)
print "NEvents to test over: ", Counter(y_test)
print "Training: "
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weight_dict = dict(enumerate(class_weights))
try:
model.fit(
X_train, y_train,# class_weight= class_weight_dict,# class_weight={ # rebalance class representation
# 0 : 0.70 * (float(len(y)) / (y == 0).sum()),
# 1 : 0.30 * (float(len(y)) / (y == 1).sum()) #These are some sort of weights. seems weird to have to do this, basically what youre training on I think
### 2 : 0.40 * (float(len(y)) / (y == 2).sum())
# },
callbacks = [
EarlyStopping(verbose=True, patience=15, monitor='val_loss'),
ModelCheckpoint('./models/tutorial-progress.h5', monitor='val_loss', verbose=True, save_best_only=True)
],
epochs=200,
validation_data=(X_val, y_val)
# validation_split = 0.3,
# verbose=True
)
except KeyboardInterrupt:
print 'Training ended early.'
model.load_weights('./models/tutorial-progress.h5')
#################
# Visualization of model history
history = model.history.history
print "history keys: ", history.keys()
#Accuracy plot
plt.plot(100 * np.array(history['acc']), label='training')
plt.plot(100 * np.array(history['val_acc']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('accuracy %')
plt.legend(loc='lower right', fontsize=20)
plt.savefig('accuarcy.png')
plt.close()
#loss plot
plt.plot(100 * np.array(history['loss']), label='training')
plt.plot(100 * np.array(history['val_loss']), label='validation')
plt.xlim(0)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right', fontsize=20)
# the line indicate the epoch corresponding to the best performance on the validation set
# plt.vlines(np.argmin(history['val_loss']), 45, 56, linestyle='dashed', linewidth=0.5)
plt.savefig('loss.png')
plt.close()
print 'Loss estimate on unseen examples (from validation set) = {0:.3f}'.format(np.min(history['val_loss']))
############################################################
###############
# -- Save network weights and structure
print 'Saving model...'
model.save_weights('./models/tutorial.h5', overwrite=True)
json_string = model.to_json()
open('./models/tutorial.json', 'w').write(json_string)
print 'Done'
print 'Testing...'
yhat = model.predict(X_test, verbose = True, batch_size = 512)
print "yhat: ", yhat
yhat_cls = np.argmax(yhat, axis=1)
import itertools
from sklearn.metrics import confusion_matrix
def | (cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''
#compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat_cls, sample_weight=w_test)
np.set_printoptions(precision=4)
plot_confusion_matrix(cnf_matrix, classes=['Sig', 'Bkg'],
normalize=True,
title='Normalized confusion matrix')
# signal eff = weighted tpr --> out of all signal events, what % for we classify as signal?
print 'Signal efficiency:', w_test[(y_test == 1) & (yhat_cls == 1)].sum() / w_test[y_test == 1].sum()
# bkg eff = weighted fpr --> out of all bkg events, what % do we classify as signal?
b_eff = w_test[(y_test != 0) & (yhat_cls == 0)].sum() / w_test[y_test != 0].sum()
print 'Background efficiency:', b_eff
print 'Background rej:', 1 / b_eff
'''
# -- events that got assigned to class 0
predicted_sig = df_full.iloc[np.array(ix_test)[yhat_cls == 0]]
predicted_sig['true'] = y_test[yhat_cls == 0]
print predicted_sig.head()
plt.clf()
bins = np.linspace(0, 1, 20)
#For normalization
wes = np.ones_like(yhat[y_test==1])/len(yhat[y_test==1])
web = np.ones_like(yhat[y_test==0])/len(yhat[y_test==0])
_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins, weights=wes)
_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins, weights=web)
#_ = plt.hist(yhat[y_test==1], histtype='stepfilled', alpha=0.5, color='red', label=r"Signal", bins=bins)
#_ = plt.hist(yhat[y_test==0], histtype='stepfilled', alpha=0.5, color='blue', label=r'Background', bins=bins)
plt.legend(loc='upper center')
plt.xlabel('P(signal) assigned by the model')
plt.tight_layout()
plt.savefig('sigbkg.png')
plt.close('all')
print "Sum of weights of first layer mapped to input variable: "
we = model.layers[1].get_weights()
for i in range(len(we[0])):
print npart[i], " : ", sum(we[0][i])
print "Making ROC Curves. . ."
from sklearn.metrics import roc_curve,roc_auc_score
#fpr = false positive, tpr = true positive
fpr, tpr,thresholds = roc_curve(y_test,yhat)
auc = roc_auc_score(y_test,yhat)
plt.figure(figsize=(10,10))
#plt.grid(b = True, which = 'minor')
#plt.grid(b = True, which = 'major')
_=plt.plot(tpr,1.-fpr, label='Model: AUC=%.3f' %auc)
plt.legend()
plt.xlim(0.,1.2)
plt.ylim(0.,1.4)
#plt.yscale('log')
plt.savefig('roc.png')
plt.clf()
print "Donezo"
| plot_confusion_matrix | identifier_name |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where | #[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
} | F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
| random_line_split |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> |
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
}
| {
match self {
Error::RendererError(e) => Some(e),
}
} | identifier_body |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn | () -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F: 'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if !is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
}
| elapsed | identifier_name |
humantoken.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
//! This module defines a [parser](parse()) and
//! [pretty-printer](TokenAmountPretty::pretty) for
//! `TokenAmount`
//!
//! See the `si` module source for supported prefixes.
pub use parse::parse;
pub use print::TokenAmountPretty;
/// SI prefix definitions
mod si {
use bigdecimal::BigDecimal;
// Use a struct as a table row instead of an enum
// to make our code less macro-heavy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Prefix {
/// `"micro"`
pub name: &'static str,
/// `[ "μ", "u" ]`
pub units: &'static [&'static str],
/// `-6`
pub exponent: i8,
/// `"0.000001"`
pub multiplier: &'static str,
}
impl Prefix {
// ENHANCE(aatifsyed): could memoize this if it's called in a hot loop
pub fn multiplier(&self) -> BigDecimal {
self.multiplier.parse().unwrap()
}
}
/// Biggest first
macro_rules! define_prefixes {
($($name:ident $symbol:ident$(or $alt_symbol:ident)* $base_10:literal $decimal:literal),* $(,)?) =>
{
// Define constants
$(
#[allow(non_upper_case_globals)]
pub const $name: Prefix = Prefix {
name: stringify!($name),
units: &[stringify!($symbol) $(, stringify!($alt_symbol))* ],
exponent: $base_10,
multiplier: stringify!($decimal),
};
)*
/// Biggest first
// Define top level array
pub const SUPPORTED_PREFIXES: &[Prefix] =
&[
$(
$name
,)*
];
};
}
define_prefixes! {
quetta Q 30 1000000000000000000000000000000,
ronna R 27 1000000000000000000000000000,
yotta Y 24 1000000000000000000000000,
zetta Z 21 1000000000000000000000,
exa E 18 1000000000000000000,
peta P 15 1000000000000000,
tera T 12 1000000000000,
giga G 9 1000000000,
mega M 6 1000000,
kilo k 3 1000,
// Leave this out because
// - it simplifies our printing logic
// - these are not commonly used
// - it's more consistent with lotus
//
// hecto h 2 100,
// deca da 1 10,
// deci d -1 0.1,
// centi c -2 0.01,
milli m -3 0.001,
micro μ or u -6 0.000001,
nano n -9 0.000000001,
pico p -12 0.000000000001,
femto f -15 0.000000000000001,
atto a -18 0.000000000000000001,
zepto z -21 0.000000000000000000001,
yocto y -24 0.000000000000000000000001,
ronto r -27 0.000000000000000000000000001,
quecto q -30 0.000000000000000000000000000001,
}
#[test]
fn sorted() {
let is_sorted_biggest_first = SUPPORTED_PREFIXES
.windows(2)
.all(|pair| pair[0].multiplier() > pair[1].multiplier());
assert!(is_sorted_biggest_first)
}
}
mod parse {
// ENHANCE(aatifsyed): could accept pairs like "1 nano 1 atto"
use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if !attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if !input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
// Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pr |
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing != actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
#[test]
fn test_display() {
assert_eq!("0 FIL", format!("{}", attos("0").pretty()));
// Absolute works
assert_eq!("1 attoFIL", format!("{}", attos("1").pretty()));
assert_eq!(
"0.000000000000000001 FIL",
format!("{:#}", attos("1").pretty())
);
// We select the right suffix
assert_eq!("1 femtoFIL", format!("{}", attos("1000").pretty()));
assert_eq!("1001 attoFIL", format!("{}", attos("1001").pretty()));
// If you ask for 0 precision, you get it
assert_eq!("~0 FIL", format!("{:.0}", attos("1001").pretty()));
// Rounding without a prefix
assert_eq!("~10 FIL", format!("{:.1}", fils("11").pretty()));
// Rounding with absolute
assert_eq!(
"~0.000000000000002 FIL",
format!("{:#.1}", attos("1940").pretty())
);
assert_eq!(
"~0.0000000000000019 FIL",
format!("{:#.2}", attos("1940").pretty())
);
assert_eq!(
"0.00000000000000194 FIL",
format!("{:#.3}", attos("1940").pretty())
);
// Small numbers with a gap then a trailing one are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.2}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.3}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.4}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.5}", attos("1001").pretty()));
// Small numbers with trailing numbers are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1234").pretty()));
assert_eq!("~1200 attoFIL", format!("{:.2}", attos("1234").pretty()));
assert_eq!("~1230 attoFIL", format!("{:.3}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.4}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.5}", attos("1234").pretty()));
// Small numbers are rounded appropriately
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1900").pretty()));
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1500").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1400").pretty()));
// Big numbers with a gap then a trailing one are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.2}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.3}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.4}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.5}", fils("1001").pretty()));
// Big numbers with trailing numbers are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1234").pretty()));
assert_eq!("~1200 FIL", format!("{:.2}", fils("1234").pretty()));
assert_eq!("~1230 FIL", format!("{:.3}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.4}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.5}", fils("1234").pretty()));
// Big numbers are rounded appropriately
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1900").pretty()));
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1500").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1400").pretty()));
}
}
}
#[cfg(test)]
mod fuzz {
use quickcheck::quickcheck;
use super::*;
quickcheck! {
fn roundtrip(expected: crate::shim::econ::TokenAmount) -> () {
// Default formatting
let actual = parse(&format!("{}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Absolute formatting
let actual = parse(&format!("{:#}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Don't test rounded formatting...
}
}
quickcheck! {
fn parser_no_panic(s: String) -> () {
let _ = parse(&s);
}
}
}
| etty { | identifier_name |
humantoken.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
//! This module defines a [parser](parse()) and
//! [pretty-printer](TokenAmountPretty::pretty) for
//! `TokenAmount`
//!
//! See the `si` module source for supported prefixes.
pub use parse::parse;
pub use print::TokenAmountPretty;
/// SI prefix definitions
mod si {
use bigdecimal::BigDecimal;
// Use a struct as a table row instead of an enum
// to make our code less macro-heavy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Prefix {
/// `"micro"`
pub name: &'static str,
/// `[ "μ", "u" ]`
pub units: &'static [&'static str],
/// `-6`
pub exponent: i8,
/// `"0.000001"`
pub multiplier: &'static str,
}
impl Prefix {
// ENHANCE(aatifsyed): could memoize this if it's called in a hot loop
pub fn multiplier(&self) -> BigDecimal {
self.multiplier.parse().unwrap()
}
}
/// Biggest first
macro_rules! define_prefixes {
($($name:ident $symbol:ident$(or $alt_symbol:ident)* $base_10:literal $decimal:literal),* $(,)?) =>
{
// Define constants
$(
#[allow(non_upper_case_globals)]
pub const $name: Prefix = Prefix {
name: stringify!($name),
units: &[stringify!($symbol) $(, stringify!($alt_symbol))* ],
exponent: $base_10,
multiplier: stringify!($decimal),
};
)*
/// Biggest first
// Define top level array
pub const SUPPORTED_PREFIXES: &[Prefix] =
&[
$(
$name
,)*
];
};
}
define_prefixes! {
quetta Q 30 1000000000000000000000000000000,
ronna R 27 1000000000000000000000000000,
yotta Y 24 1000000000000000000000000,
zetta Z 21 1000000000000000000000,
exa E 18 1000000000000000000,
peta P 15 1000000000000000,
tera T 12 1000000000000,
giga G 9 1000000000,
mega M 6 1000000,
kilo k 3 1000,
// Leave this out because
// - it simplifies our printing logic
// - these are not commonly used
// - it's more consistent with lotus
//
// hecto h 2 100,
// deca da 1 10,
// deci d -1 0.1,
// centi c -2 0.01,
milli m -3 0.001,
micro μ or u -6 0.000001,
nano n -9 0.000000001,
pico p -12 0.000000000001,
femto f -15 0.000000000000001,
atto a -18 0.000000000000000001,
zepto z -21 0.000000000000000000001,
yocto y -24 0.000000000000000000000001,
ronto r -27 0.000000000000000000000000001,
quecto q -30 0.000000000000000000000000000001,
}
#[test]
fn sorted() {
let is_sorted_biggest_first = SUPPORTED_PREFIXES
.windows(2)
.all(|pair| pair[0].multiplier() > pair[1].multiplier());
assert!(is_sorted_biggest_first)
}
}
mod parse {
// ENHANCE(aatifsyed): could accept pairs like "1 nano 1 atto"
use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if !attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if !input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
// Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pretty {
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing != actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
#[test]
fn test_display() {
assert_eq!("0 FIL", format!("{}", attos("0").pretty()));
// Absolute works
assert_eq!("1 attoFIL", format!("{}", attos("1").pretty()));
assert_eq!(
"0.000000000000000001 FIL",
format!("{:#}", attos("1").pretty())
);
// We select the right suffix
assert_eq!("1 femtoFIL", format!("{}", attos("1000").pretty()));
assert_eq!("1001 attoFIL", format!("{}", attos("1001").pretty()));
// If you ask for 0 precision, you get it
assert_eq!("~0 FIL", format!("{:.0}", attos("1001").pretty()));
// Rounding without a prefix
assert_eq!("~10 FIL", format!("{:.1}", fils("11").pretty()));
// Rounding with absolute
assert_eq!(
"~0.000000000000002 FIL",
format!("{:#.1}", attos("1940").pretty())
);
assert_eq!(
"~0.0000000000000019 FIL",
format!("{:#.2}", attos("1940").pretty())
);
assert_eq!(
"0.00000000000000194 FIL",
format!("{:#.3}", attos("1940").pretty())
);
// Small numbers with a gap then a trailing one are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.2}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.3}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.4}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.5}", attos("1001").pretty()));
// Small numbers with trailing numbers are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1234").pretty()));
assert_eq!("~1200 attoFIL", format!("{:.2}", attos("1234").pretty()));
assert_eq!("~1230 attoFIL", format!("{:.3}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.4}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.5}", attos("1234").pretty()));
// Small numbers are rounded appropriately
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1900").pretty()));
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1500").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1400").pretty()));
// Big numbers with a gap then a trailing one are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.2}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.3}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.4}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.5}", fils("1001").pretty()));
// Big numbers with trailing numbers are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1234").pretty()));
assert_eq!("~1200 FIL", format!("{:.2}", fils("1234").pretty()));
assert_eq!("~1230 FIL", format!("{:.3}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.4}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.5}", fils("1234").pretty()));
// Big numbers are rounded appropriately
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1900").pretty()));
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1500").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1400").pretty()));
}
}
}
#[cfg(test)]
mod fuzz {
use quickcheck::quickcheck;
use super::*;
quickcheck! {
fn roundtrip(expected: crate::shim::econ::TokenAmount) -> () {
// Default formatting
let actual = parse(&format!("{}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Absolute formatting
let actual = parse(&format!("{:#}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Don't test rounded formatting...
}
}
| } | quickcheck! {
fn parser_no_panic(s: String) -> () {
let _ = parse(&s);
}
} | random_line_split |
humantoken.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
//! This module defines a [parser](parse()) and
//! [pretty-printer](TokenAmountPretty::pretty) for
//! `TokenAmount`
//!
//! See the `si` module source for supported prefixes.
pub use parse::parse;
pub use print::TokenAmountPretty;
/// SI prefix definitions
mod si {
use bigdecimal::BigDecimal;
// Use a struct as a table row instead of an enum
// to make our code less macro-heavy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Prefix {
/// `"micro"`
pub name: &'static str,
/// `[ "μ", "u" ]`
pub units: &'static [&'static str],
/// `-6`
pub exponent: i8,
/// `"0.000001"`
pub multiplier: &'static str,
}
impl Prefix {
// ENHANCE(aatifsyed): could memoize this if it's called in a hot loop
pub fn multiplier(&self) -> BigDecimal {
self.multiplier.parse().unwrap()
}
}
/// Biggest first
macro_rules! define_prefixes {
($($name:ident $symbol:ident$(or $alt_symbol:ident)* $base_10:literal $decimal:literal),* $(,)?) =>
{
// Define constants
$(
#[allow(non_upper_case_globals)]
pub const $name: Prefix = Prefix {
name: stringify!($name),
units: &[stringify!($symbol) $(, stringify!($alt_symbol))* ],
exponent: $base_10,
multiplier: stringify!($decimal),
};
)*
/// Biggest first
// Define top level array
pub const SUPPORTED_PREFIXES: &[Prefix] =
&[
$(
$name
,)*
];
};
}
define_prefixes! {
quetta Q 30 1000000000000000000000000000000,
ronna R 27 1000000000000000000000000000,
yotta Y 24 1000000000000000000000000,
zetta Z 21 1000000000000000000000,
exa E 18 1000000000000000000,
peta P 15 1000000000000000,
tera T 12 1000000000000,
giga G 9 1000000000,
mega M 6 1000000,
kilo k 3 1000,
// Leave this out because
// - it simplifies our printing logic
// - these are not commonly used
// - it's more consistent with lotus
//
// hecto h 2 100,
// deca da 1 10,
// deci d -1 0.1,
// centi c -2 0.01,
milli m -3 0.001,
micro μ or u -6 0.000001,
nano n -9 0.000000001,
pico p -12 0.000000000001,
femto f -15 0.000000000000001,
atto a -18 0.000000000000000001,
zepto z -21 0.000000000000000000001,
yocto y -24 0.000000000000000000000001,
ronto r -27 0.000000000000000000000000001,
quecto q -30 0.000000000000000000000000000001,
}
#[test]
fn sorted() {
let is_sorted_biggest_first = SUPPORTED_PREFIXES
.windows(2)
.all(|pair| pair[0].multiplier() > pair[1].multiplier());
assert!(is_sorted_biggest_first)
}
}
mod parse {
// ENHANCE(aatifsyed): could accept pairs like "1 nano 1 atto"
use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if !attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if !input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
| /// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pretty {
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing != actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
#[test]
fn test_display() {
assert_eq!("0 FIL", format!("{}", attos("0").pretty()));
// Absolute works
assert_eq!("1 attoFIL", format!("{}", attos("1").pretty()));
assert_eq!(
"0.000000000000000001 FIL",
format!("{:#}", attos("1").pretty())
);
// We select the right suffix
assert_eq!("1 femtoFIL", format!("{}", attos("1000").pretty()));
assert_eq!("1001 attoFIL", format!("{}", attos("1001").pretty()));
// If you ask for 0 precision, you get it
assert_eq!("~0 FIL", format!("{:.0}", attos("1001").pretty()));
// Rounding without a prefix
assert_eq!("~10 FIL", format!("{:.1}", fils("11").pretty()));
// Rounding with absolute
assert_eq!(
"~0.000000000000002 FIL",
format!("{:#.1}", attos("1940").pretty())
);
assert_eq!(
"~0.0000000000000019 FIL",
format!("{:#.2}", attos("1940").pretty())
);
assert_eq!(
"0.00000000000000194 FIL",
format!("{:#.3}", attos("1940").pretty())
);
// Small numbers with a gap then a trailing one are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.2}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.3}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.4}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.5}", attos("1001").pretty()));
// Small numbers with trailing numbers are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1234").pretty()));
assert_eq!("~1200 attoFIL", format!("{:.2}", attos("1234").pretty()));
assert_eq!("~1230 attoFIL", format!("{:.3}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.4}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.5}", attos("1234").pretty()));
// Small numbers are rounded appropriately
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1900").pretty()));
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1500").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1400").pretty()));
// Big numbers with a gap then a trailing one are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.2}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.3}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.4}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.5}", fils("1001").pretty()));
// Big numbers with trailing numbers are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1234").pretty()));
assert_eq!("~1200 FIL", format!("{:.2}", fils("1234").pretty()));
assert_eq!("~1230 FIL", format!("{:.3}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.4}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.5}", fils("1234").pretty()));
// Big numbers are rounded appropriately
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1900").pretty()));
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1500").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1400").pretty()));
}
}
}
#[cfg(test)]
mod fuzz {
use quickcheck::quickcheck;
use super::*;
quickcheck! {
fn roundtrip(expected: crate::shim::econ::TokenAmount) -> () {
// Default formatting
let actual = parse(&format!("{}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Absolute formatting
let actual = parse(&format!("{:#}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Don't test rounded formatting...
}
}
quickcheck! {
fn parser_no_panic(s: String) -> () {
let _ = parse(&s);
}
}
}
| // Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
| identifier_body |
eccrypto.py | import os
import hashlib
import struct
def int_to_bytes(raw, length):
data = []
for _ in range(length):
data.append(raw % 256)
raw //= 256
return bytes(data[::-1])
def bytes_to_int(data):
raw = 0
for byte in data:
raw = raw * 256 + byte
return raw
def legendre(a, p):
res = pow(a, (p - 1) // 2, p)
if res == p - 1:
return -1
else:
return res
def inverse(a, n):
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def square_root_mod_prime(n, p):
if n == 0:
return 0
if p == 2:
return n # We should never get here but it might be useful
if legendre(n, p) != 1:
raise ValueError("No square root")
# Optimizations
if p % 4 == 3:
return pow(n, (p + 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def | (self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack(
"!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def decode_public_key(self, public_key):
return self._decode_public_key(public_key)
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01"
if is_compressed else b"")
def private_to_public(self, private_key):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(
private_key
) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
x, y = self._backend.private_to_public(private_key)
return self._encode_public_key(x, y, is_compressed=is_compressed)
def _digest(self, data, hash):
if hash is None:
return data
elif callable(hash):
return hash(data)
elif hash == "sha1":
return hashlib.sha1(data).digest()
elif hash == "sha256":
return hashlib.sha256(data).digest()
elif hash == "sha512":
return hashlib.sha512(data).digest()
else:
raise ValueError("Unknown hash/derivation method")
ecc = ECC(EllipticCurveBackend, None)
curve = ecc.get_curve()
| __init__ | identifier_name |
eccrypto.py | import os
import hashlib
import struct
def int_to_bytes(raw, length):
data = []
for _ in range(length):
data.append(raw % 256)
raw //= 256
return bytes(data[::-1])
def bytes_to_int(data):
raw = 0
for byte in data:
raw = raw * 256 + byte
return raw
def legendre(a, p):
res = pow(a, (p - 1) // 2, p)
if res == p - 1:
return -1
else:
return res
def inverse(a, n):
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def square_root_mod_prime(n, p):
if n == 0:
return 0
if p == 2:
return n # We should never get here but it might be useful
if legendre(n, p) != 1:
raise ValueError("No square root")
# Optimizations
if p % 4 == 3:
return pow(n, (p + 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
|
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack(
"!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def decode_public_key(self, public_key):
return self._decode_public_key(public_key)
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01"
if is_compressed else b"")
def private_to_public(self, private_key):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(
private_key
) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
x, y = self._backend.private_to_public(private_key)
return self._encode_public_key(x, y, is_compressed=is_compressed)
def _digest(self, data, hash):
if hash is None:
return data
elif callable(hash):
return hash(data)
elif hash == "sha1":
return hashlib.sha1(data).digest()
elif hash == "sha256":
return hashlib.sha256(data).digest()
elif hash == "sha512":
return hashlib.sha512(data).digest()
else:
raise ValueError("Unknown hash/derivation method")
ecc = ECC(EllipticCurveBackend, None)
curve = ecc.get_curve()
| x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y) | identifier_body |
eccrypto.py | import os
import hashlib
import struct
def int_to_bytes(raw, length):
data = []
for _ in range(length):
data.append(raw % 256)
raw //= 256
return bytes(data[::-1])
def bytes_to_int(data):
raw = 0
for byte in data:
raw = raw * 256 + byte
return raw
def legendre(a, p):
res = pow(a, (p - 1) // 2, p)
if res == p - 1:
return -1
else:
return res
def inverse(a, n):
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def square_root_mod_prime(n, p):
if n == 0:
return 0
if p == 2:
return n # We should never get here but it might be useful
if legendre(n, p) != 1:
raise ValueError("No square root")
# Optimizations
if p % 4 == 3:
return pow(n, (p + 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack(
"!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def decode_public_key(self, public_key):
return self._decode_public_key(public_key) |
def private_to_public(self, private_key):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(
private_key
) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
x, y = self._backend.private_to_public(private_key)
return self._encode_public_key(x, y, is_compressed=is_compressed)
def _digest(self, data, hash):
if hash is None:
return data
elif callable(hash):
return hash(data)
elif hash == "sha1":
return hashlib.sha1(data).digest()
elif hash == "sha256":
return hashlib.sha256(data).digest()
elif hash == "sha512":
return hashlib.sha512(data).digest()
else:
raise ValueError("Unknown hash/derivation method")
ecc = ECC(EllipticCurveBackend, None)
curve = ecc.get_curve() |
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01"
if is_compressed else b"") | random_line_split |
eccrypto.py | import os
import hashlib
import struct
def int_to_bytes(raw, length):
data = []
for _ in range(length):
data.append(raw % 256)
raw //= 256
return bytes(data[::-1])
def bytes_to_int(data):
raw = 0
for byte in data:
raw = raw * 256 + byte
return raw
def legendre(a, p):
res = pow(a, (p - 1) // 2, p)
if res == p - 1:
return -1
else:
return res
def inverse(a, n):
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def square_root_mod_prime(n, p):
if n == 0:
return 0
if p == 2:
return n # We should never get here but it might be useful
if legendre(n, p) != 1:
raise ValueError("No square root")
# Optimizations
if p % 4 == 3:
return pow(n, (p + 1) // 4, p)
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
# Q * 2 ** S with Q odd
q = p - 1
s = 0
while q % 2 == 0:
|
# 2. Search for z in Z/pZ which is a quadratic non-residue
z = 1
while legendre(z, p) != -1:
z += 1
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
while True:
if t == 0:
return 0
elif t == 1:
return r
# Use repeated squaring to find the least i, 0 < i < M, such
# that t ** (2 ** i) = 1
t_sq = t
i = 0
for i in range(1, m):
t_sq = t_sq * t_sq % p
if t_sq == 1:
break
else:
raise ValueError("Should never get here")
# Let b = c ** (2 ** (m - i - 1))
b = pow(c, 2**(m - i - 1), p)
m = i
c = b * b % p
t = t * b * b % p
r = r * b % p
return r
class JacobianCurve:
def __init__(self, p, n, a, b, g):
self.p = p
self.n = n
self.a = a
self.b = b
self.g = g
self.n_length = len(bin(self.n).replace("0b", ""))
def isinf(self, p):
return p[0] == 0 and p[1] == 0
def to_jacobian(self, p):
return p[0], p[1], 1
def jacobian_double(self, p):
if not p[1]:
return 0, 0, 0
ysq = (p[1]**2) % self.p
s = (4 * p[0] * ysq) % self.p
m = (3 * p[0]**2 + self.a * p[2]**4) % self.p
nx = (m**2 - 2 * s) % self.p
ny = (m * (s - nx) - 8 * ysq**2) % self.p
nz = (2 * p[1] * p[2]) % self.p
return nx, ny, nz
def jacobian_add(self, p, q):
if not p[1]:
return q
if not q[1]:
return p
u1 = (p[0] * q[2]**2) % self.p
u2 = (q[0] * p[2]**2) % self.p
s1 = (p[1] * q[2]**3) % self.p
s2 = (q[1] * p[2]**3) % self.p
if u1 == u2:
if s1 != s2:
return (0, 0, 1)
return self.jacobian_double(p)
h = u2 - u1
r = s2 - s1
h2 = (h * h) % self.p
h3 = (h * h2) % self.p
u1h2 = (u1 * h2) % self.p
nx = (r**2 - h3 - 2 * u1h2) % self.p
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
nz = (h * p[2] * q[2]) % self.p
return (nx, ny, nz)
def from_jacobian(self, p):
z = inverse(p[2], self.p)
return (p[0] * z**2) % self.p, (p[1] * z**3) % self.p
def jacobian_multiply(self, a, n, secret=False):
if a[1] == 0 or n == 0:
return 0, 0, 1
if n == 1:
return a
if n < 0 or n >= self.n:
return self.jacobian_multiply(a, n % self.n, secret)
half = self.jacobian_multiply(a, n // 2, secret)
half_sq = self.jacobian_double(half)
if secret:
# A constant-time implementation
half_sq_a = self.jacobian_add(half_sq, a)
if n % 2 == 0:
result = half_sq
if n % 2 == 1:
result = half_sq_a
return result
else:
if n % 2 == 0:
return half_sq
return self.jacobian_add(half_sq, a)
def fast_multiply(self, a, n, secret=False):
return self.from_jacobian(
self.jacobian_multiply(self.to_jacobian(a), n, secret))
class EllipticCurveBackend:
def __init__(self, p, n, a, b, g):
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
self.jacobian = JacobianCurve(p, n, a, b, g)
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
self.order_bitlength = len(bin(n).replace("0b", ""))
def _int_to_bytes(self, raw, len=None):
return int_to_bytes(raw, len or self.public_key_length)
def decompress_point(self, public_key):
# Parse & load data
x = bytes_to_int(public_key[1:])
# Calculate Y
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
try:
y = square_root_mod_prime(y_square, self.p)
except Exception:
raise ValueError("Invalid public key") from None
if y % 2 != public_key[0] - 0x02:
y = self.p - y
return self._int_to_bytes(x), self._int_to_bytes(y)
def new_private_key(self):
while True:
private_key = os.urandom(self.public_key_length)
if bytes_to_int(private_key) >= self.n:
continue
return private_key
def private_to_public(self, private_key):
raw = bytes_to_int(private_key)
x, y = self.jacobian.fast_multiply(self.g, raw)
return self._int_to_bytes(x), self._int_to_bytes(y)
def ecdh(self, private_key, public_key):
x, y = public_key
x, y = bytes_to_int(x), bytes_to_int(y)
private_key = bytes_to_int(private_key)
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
return self._int_to_bytes(x)
def _subject_to_int(self, subject):
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
class ECC:
# pylint: disable=line-too-long
# name: (nid, p, n, a, b, (Gx, Gy)),
CURVE = (
714,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141, 0,
7,
(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8))
# pylint: enable=line-too-long
def __init__(self, backend, aes):
self._backend = backend
self._aes = aes
def get_curve(self):
nid, p, n, a, b, g = self.CURVE
params = {"p": p, "n": n, "a": a, "b": b, "g": g}
return EllipticCurve(self._backend, params, self._aes, nid)
class EllipticCurve:
def __init__(self, backend_factory, params, aes, nid):
self._backend = backend_factory(**params)
self.params = params
self._aes = aes
self.nid = nid
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
if raw:
if is_compressed:
return bytes([0x02 + (y[-1] % 2)]) + x
else:
return bytes([0x04]) + x + y
else:
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack(
"!H", len(y)) + y
def _decode_public_key(self, public_key, partial=False):
if not public_key:
raise ValueError("No public key")
if public_key[0] == 0x04:
# Uncompressed
expected_length = 1 + 2 * self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid uncompressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid uncompressed public key length")
x = public_key[1:1 + self._backend.public_key_length]
y = public_key[1 + self._backend.public_key_length:expected_length]
if partial:
return (x, y), expected_length
else:
return x, y
elif public_key[0] in (0x02, 0x03):
# Compressed
expected_length = 1 + self._backend.public_key_length
if partial:
if len(public_key) < expected_length:
raise ValueError("Invalid compressed public key length")
else:
if len(public_key) != expected_length:
raise ValueError("Invalid compressed public key length")
x, y = self._backend.decompress_point(public_key[:expected_length])
# Sanity check
if x != public_key[1:expected_length]:
raise ValueError("Incorrect compressed public key")
if partial:
return (x, y), expected_length
else:
return x, y
else:
raise ValueError("Invalid public key prefix")
def decode_public_key(self, public_key):
return self._decode_public_key(public_key)
def new_private_key(self, is_compressed=False):
return self._backend.new_private_key() + (b"\x01"
if is_compressed else b"")
def private_to_public(self, private_key):
if len(private_key) == self._backend.public_key_length:
is_compressed = False
elif len(
private_key
) == self._backend.public_key_length + 1 and private_key[-1] == 1:
is_compressed = True
private_key = private_key[:-1]
else:
raise ValueError("Private key has invalid length")
x, y = self._backend.private_to_public(private_key)
return self._encode_public_key(x, y, is_compressed=is_compressed)
def _digest(self, data, hash):
if hash is None:
return data
elif callable(hash):
return hash(data)
elif hash == "sha1":
return hashlib.sha1(data).digest()
elif hash == "sha256":
return hashlib.sha256(data).digest()
elif hash == "sha512":
return hashlib.sha512(data).digest()
else:
raise ValueError("Unknown hash/derivation method")
ecc = ECC(EllipticCurveBackend, None)
curve = ecc.get_curve()
| q //= 2
s += 1 | conditional_block |
github.go | package github
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
githubO2 "golang.org/x/oauth2/github"
"github.com/google/go-github/github"
"github.com/watchly/ngbuild/core"
)
var oauth2State = fmt.Sprintf("%d%d%d", os.Getuid(), os.Getpid(), time.Now().Unix())
type pullRequestStatus struct {
pull *github.PullRequest
currentBuild string // build token
mergeOnPass bool
}
type githubConfig struct {
ClientID string `mapstructure:"clientID"`
ClientSecret string `mapstructure:"clientSecret"`
Owner string `mapstructure:"owner"`
Repo string `mapstructure:"repo"`
IgnoredBranches []string `mapstructure:"ignoredBranches"`
PublicKey string `mapstructure:"publicKey"`
BuildBranches []string `mapstructure:"buildBranches"`
CancelOnNewCommit bool `mapstructure:"cancelOnNewCommit"`
MergeOnPass bool `mapstructure:"mergeOnPass"`
MergeOnPassAuthWords []string `mapstructure:"mergeOnPassAuthWords"`
}
type githubApp struct {
app core.App
config githubConfig
}
// Github ...
type Github struct {
m sync.RWMutex
globalConfig githubConfig
apps map[string]*githubApp
client *github.Client
clientID, clientSecret string
clientHasSet *sync.Cond
trackedPullRequests map[string]pullRequestStatus
trackedBuilds []core.Build
}
// New ...
func New() *Github {
g := &Github{
clientHasSet: sync.NewCond(&sync.Mutex{}),
apps: make(map[string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "git@github.com:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) acquireOauthToken() {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{pull, "", false}
g.trackedPullRequests[pullID] = status
}
// we want to check to see if we are already building or already built this commit
// and we want to cancel the previous build
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if build.Config().GetMetadata("github:HeadHash") == *pull.Head.SHA {
logwarnf("Already building/built this commit")
return
}
if app.config.CancelOnNewCommit {
build.Stop()
}
}
headBranch := *pull.Head.Ref
headCloneURL := *pull.Head.Repo.SSHURL
headCommit := *pull.Head.SHA
headOwner := *pull.Head.Repo.Owner.Login
headRepo := *pull.Head.Repo.Name
baseBranch := *pull.Base.Ref
baseCloneURL := *pull.Base.Repo.SSHURL
baseOwner := *pull.Base.Repo.Owner.Login
baseRepo := *pull.Base.Repo.Name
baseCommit := *pull.Base.SHA
buildConfig := core.NewBuildConfig()
buildConfig.Title = *pull.Title
buildConfig.URL = *pull.HTMLURL
buildConfig.HeadRepo = headCloneURL
buildConfig.HeadBranch = headBranch
buildConfig.HeadHash = headCommit
buildConfig.BaseRepo = baseCloneURL
buildConfig.BaseBranch = baseBranch
buildConfig.BaseHash = ""
buildConfig.Group = pullID
buildConfig.SetMetadata("github:BuildType", "pullrequest")
buildConfig.SetMetadata("github:PullRequestID", pullID)
buildConfig.SetMetadata("github:PullNumber", fmt.Sprintf("%d", *pull.Number))
buildConfig.SetMetadata("github:HeadHash", headCommit)
buildConfig.SetMetadata("github:HeadOwner", headOwner)
buildConfig.SetMetadata("github:HeadRepo", headRepo)
buildConfig.SetMetadata("github:BaseHash", baseCommit)
buildConfig.SetMetadata("github:BaseOwner", baseOwner)
buildConfig.SetMetadata("github:BaseRepo", baseRepo)
buildToken, err := app.app.NewBuild(buildConfig.Group, buildConfig)
if err != nil {
logcritf("Couldn't start build for %d", *pull.ID)
return
}
build, err := app.app.GetBuild(buildToken)
if err != nil || build == nil {
logcritf("Couldn't get build for %d", *pull.ID)
return
}
status.currentBuild = buildToken
g.trackedPullRequests[pullID] = status
loginfof("started build: %s", buildToken)
}
func (g *Github) updatePullRequest(app *githubApp, event *github.PullRequestEvent) {
// this is called when there is a new commit on the pull request or something like that
pullID := strconv.Itoa(*event.PullRequest.ID)
g.m.RLock()
_, ok := g.trackedPullRequests[pullID]
g.m.RUnlock()
if ok == false {
logwarnf("event on unknown/ignored pull request: %s", pullID)
g.trackPullRequest(app, event)
return
}
g.buildPullRequest(app, event.PullRequest)
}
func (g *Github) closedPullRequest(app *githubApp, event *github.PullRequestEvent) {
g.m.RLock()
defer g.m.RUnlock()
pullID := strconv.Itoa(*event.PullRequest.ID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
return
}
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if app.config.CancelOnNewCommit {
build.Stop()
}
}
delete(g.trackedPullRequests, pullID)
}
func loginfof(str string, args ...interface{}) (ret string) { | ret = fmt.Sprintf("github-info: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logwarnf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-warn: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logcritf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-crit: "+str+"\n", args...)
fmt.Printf(ret)
return ret
} | random_line_split | |
github.go | package github
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
githubO2 "golang.org/x/oauth2/github"
"github.com/google/go-github/github"
"github.com/watchly/ngbuild/core"
)
var oauth2State = fmt.Sprintf("%d%d%d", os.Getuid(), os.Getpid(), time.Now().Unix())
type pullRequestStatus struct {
pull *github.PullRequest
currentBuild string // build token
mergeOnPass bool
}
type githubConfig struct {
ClientID string `mapstructure:"clientID"`
ClientSecret string `mapstructure:"clientSecret"`
Owner string `mapstructure:"owner"`
Repo string `mapstructure:"repo"`
IgnoredBranches []string `mapstructure:"ignoredBranches"`
PublicKey string `mapstructure:"publicKey"`
BuildBranches []string `mapstructure:"buildBranches"`
CancelOnNewCommit bool `mapstructure:"cancelOnNewCommit"`
MergeOnPass bool `mapstructure:"mergeOnPass"`
MergeOnPassAuthWords []string `mapstructure:"mergeOnPassAuthWords"`
}
type githubApp struct {
app core.App
config githubConfig
}
// Github ...
type Github struct {
m sync.RWMutex
globalConfig githubConfig
apps map[string]*githubApp
client *github.Client
clientID, clientSecret string
clientHasSet *sync.Cond
trackedPullRequests map[string]pullRequestStatus
trackedBuilds []core.Build
}
// New ...
func New() *Github {
g := &Github{
clientHasSet: sync.NewCond(&sync.Mutex{}),
apps: make(map[string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "git@github.com:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) acquireOauthToken() {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
lo | urn nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{pull, "", false}
g.trackedPullRequests[pullID] = status
}
// we want to check to see if we are already building or already built this commit
// and we want to cancel the previous build
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if build.Config().GetMetadata("github:HeadHash") == *pull.Head.SHA {
logwarnf("Already building/built this commit")
return
}
if app.config.CancelOnNewCommit {
build.Stop()
}
}
headBranch := *pull.Head.Ref
headCloneURL := *pull.Head.Repo.SSHURL
headCommit := *pull.Head.SHA
headOwner := *pull.Head.Repo.Owner.Login
headRepo := *pull.Head.Repo.Name
baseBranch := *pull.Base.Ref
baseCloneURL := *pull.Base.Repo.SSHURL
baseOwner := *pull.Base.Repo.Owner.Login
baseRepo := *pull.Base.Repo.Name
baseCommit := *pull.Base.SHA
buildConfig := core.NewBuildConfig()
buildConfig.Title = *pull.Title
buildConfig.URL = *pull.HTMLURL
buildConfig.HeadRepo = headCloneURL
buildConfig.HeadBranch = headBranch
buildConfig.HeadHash = headCommit
buildConfig.BaseRepo = baseCloneURL
buildConfig.BaseBranch = baseBranch
buildConfig.BaseHash = ""
buildConfig.Group = pullID
buildConfig.SetMetadata("github:BuildType", "pullrequest")
buildConfig.SetMetadata("github:PullRequestID", pullID)
buildConfig.SetMetadata("github:PullNumber", fmt.Sprintf("%d", *pull.Number))
buildConfig.SetMetadata("github:HeadHash", headCommit)
buildConfig.SetMetadata("github:HeadOwner", headOwner)
buildConfig.SetMetadata("github:HeadRepo", headRepo)
buildConfig.SetMetadata("github:BaseHash", baseCommit)
buildConfig.SetMetadata("github:BaseOwner", baseOwner)
buildConfig.SetMetadata("github:BaseRepo", baseRepo)
buildToken, err := app.app.NewBuild(buildConfig.Group, buildConfig)
if err != nil {
logcritf("Couldn't start build for %d", *pull.ID)
return
}
build, err := app.app.GetBuild(buildToken)
if err != nil || build == nil {
logcritf("Couldn't get build for %d", *pull.ID)
return
}
status.currentBuild = buildToken
g.trackedPullRequests[pullID] = status
loginfof("started build: %s", buildToken)
}
func (g *Github) updatePullRequest(app *githubApp, event *github.PullRequestEvent) {
// this is called when there is a new commit on the pull request or something like that
pullID := strconv.Itoa(*event.PullRequest.ID)
g.m.RLock()
_, ok := g.trackedPullRequests[pullID]
g.m.RUnlock()
if ok == false {
logwarnf("event on unknown/ignored pull request: %s", pullID)
g.trackPullRequest(app, event)
return
}
g.buildPullRequest(app, event.PullRequest)
}
func (g *Github) closedPullRequest(app *githubApp, event *github.PullRequestEvent) {
g.m.RLock()
defer g.m.RUnlock()
pullID := strconv.Itoa(*event.PullRequest.ID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
return
}
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if app.config.CancelOnNewCommit {
build.Stop()
}
}
delete(g.trackedPullRequests, pullID)
}
func loginfof(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-info: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logwarnf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-warn: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logcritf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-crit: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
| gcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
ret | conditional_block |
github.go | package github
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
githubO2 "golang.org/x/oauth2/github"
"github.com/google/go-github/github"
"github.com/watchly/ngbuild/core"
)
var oauth2State = fmt.Sprintf("%d%d%d", os.Getuid(), os.Getpid(), time.Now().Unix())
type pullRequestStatus struct {
pull *github.PullRequest
currentBuild string // build token
mergeOnPass bool
}
type githubConfig struct {
ClientID string `mapstructure:"clientID"`
ClientSecret string `mapstructure:"clientSecret"`
Owner string `mapstructure:"owner"`
Repo string `mapstructure:"repo"`
IgnoredBranches []string `mapstructure:"ignoredBranches"`
PublicKey string `mapstructure:"publicKey"`
BuildBranches []string `mapstructure:"buildBranches"`
CancelOnNewCommit bool `mapstructure:"cancelOnNewCommit"`
MergeOnPass bool `mapstructure:"mergeOnPass"`
MergeOnPassAuthWords []string `mapstructure:"mergeOnPassAuthWords"`
}
type githubApp struct {
app core.App
config githubConfig
}
// Github ...
type Github struct {
m sync.RWMutex
globalConfig githubConfig
apps map[string]*githubApp
client *github.Client
clientID, clientSecret string
clientHasSet *sync.Cond
trackedPullRequests map[string]pullRequestStatus
trackedBuilds []core.Build
}
// New ...
func New() *Github {
g := &Github{
clientHasSet: sync.NewCond(&sync.Mutex{}),
apps: make(map[string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "git@github.com:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) | () {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// hold the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{pull, "", false}
g.trackedPullRequests[pullID] = status
}
// we want to check to see if we are already building or already built this commit
// and we want to cancel the previous build
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if build.Config().GetMetadata("github:HeadHash") == *pull.Head.SHA {
logwarnf("Already building/built this commit")
return
}
if app.config.CancelOnNewCommit {
build.Stop()
}
}
headBranch := *pull.Head.Ref
headCloneURL := *pull.Head.Repo.SSHURL
headCommit := *pull.Head.SHA
headOwner := *pull.Head.Repo.Owner.Login
headRepo := *pull.Head.Repo.Name
baseBranch := *pull.Base.Ref
baseCloneURL := *pull.Base.Repo.SSHURL
baseOwner := *pull.Base.Repo.Owner.Login
baseRepo := *pull.Base.Repo.Name
baseCommit := *pull.Base.SHA
buildConfig := core.NewBuildConfig()
buildConfig.Title = *pull.Title
buildConfig.URL = *pull.HTMLURL
buildConfig.HeadRepo = headCloneURL
buildConfig.HeadBranch = headBranch
buildConfig.HeadHash = headCommit
buildConfig.BaseRepo = baseCloneURL
buildConfig.BaseBranch = baseBranch
buildConfig.BaseHash = ""
buildConfig.Group = pullID
buildConfig.SetMetadata("github:BuildType", "pullrequest")
buildConfig.SetMetadata("github:PullRequestID", pullID)
buildConfig.SetMetadata("github:PullNumber", fmt.Sprintf("%d", *pull.Number))
buildConfig.SetMetadata("github:HeadHash", headCommit)
buildConfig.SetMetadata("github:HeadOwner", headOwner)
buildConfig.SetMetadata("github:HeadRepo", headRepo)
buildConfig.SetMetadata("github:BaseHash", baseCommit)
buildConfig.SetMetadata("github:BaseOwner", baseOwner)
buildConfig.SetMetadata("github:BaseRepo", baseRepo)
buildToken, err := app.app.NewBuild(buildConfig.Group, buildConfig)
if err != nil {
logcritf("Couldn't start build for %d", *pull.ID)
return
}
build, err := app.app.GetBuild(buildToken)
if err != nil || build == nil {
logcritf("Couldn't get build for %d", *pull.ID)
return
}
status.currentBuild = buildToken
g.trackedPullRequests[pullID] = status
loginfof("started build: %s", buildToken)
}
func (g *Github) updatePullRequest(app *githubApp, event *github.PullRequestEvent) {
// this is called when there is a new commit on the pull request or something like that
pullID := strconv.Itoa(*event.PullRequest.ID)
g.m.RLock()
_, ok := g.trackedPullRequests[pullID]
g.m.RUnlock()
if ok == false {
logwarnf("event on unknown/ignored pull request: %s", pullID)
g.trackPullRequest(app, event)
return
}
g.buildPullRequest(app, event.PullRequest)
}
func (g *Github) closedPullRequest(app *githubApp, event *github.PullRequestEvent) {
g.m.RLock()
defer g.m.RUnlock()
pullID := strconv.Itoa(*event.PullRequest.ID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
return
}
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if app.config.CancelOnNewCommit {
build.Stop()
}
}
delete(g.trackedPullRequests, pullID)
}
func loginfof(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-info: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logwarnf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-warn: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logcritf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-crit: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
| acquireOauthToken | identifier_name |
github.go | package github
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
githubO2 "golang.org/x/oauth2/github"
"github.com/google/go-github/github"
"github.com/watchly/ngbuild/core"
)
var oauth2State = fmt.Sprintf("%d%d%d", os.Getuid(), os.Getpid(), time.Now().Unix())
type pullRequestStatus struct {
pull *github.PullRequest
currentBuild string // build token
mergeOnPass bool
}
type githubConfig struct {
ClientID string `mapstructure:"clientID"`
ClientSecret string `mapstructure:"clientSecret"`
Owner string `mapstructure:"owner"`
Repo string `mapstructure:"repo"`
IgnoredBranches []string `mapstructure:"ignoredBranches"`
PublicKey string `mapstructure:"publicKey"`
BuildBranches []string `mapstructure:"buildBranches"`
CancelOnNewCommit bool `mapstructure:"cancelOnNewCommit"`
MergeOnPass bool `mapstructure:"mergeOnPass"`
MergeOnPassAuthWords []string `mapstructure:"mergeOnPassAuthWords"`
}
type githubApp struct {
app core.App
config githubConfig
}
// Github ...
type Github struct {
m sync.RWMutex
globalConfig githubConfig
apps map[string]*githubApp
client *github.Client
clientID, clientSecret string
clientHasSet *sync.Cond
trackedPullRequests map[string]pullRequestStatus
trackedBuilds []core.Build
}
// New ...
func New() *Github {
g := &Github{
clientHasSet: sync.NewCond(&sync.Mutex{}),
apps: make(map[string]*githubApp),
trackedPullRequests: make(map[string]pullRequestStatus),
}
http.HandleFunc("/cb/auth/github", g.handleGithubAuth)
http.HandleFunc("/cb/github/hook/", g.handleGithubEvent)
return g
}
// Identifier ...
func (g *Github) Identifier() string { return "github" }
// IsProvider ...
func (g *Github) IsProvider(source string) bool {
loginfof("Asked to provide for %s", source)
return strings.HasPrefix(source, "git@github.com:") || source == ""
}
// ProvideFor ...
func (g *Github) ProvideFor(config *core.BuildConfig, directory string) error {
// FIXME, need to git checkout the given config
return g.cloneAndMerge(directory, config)
}
func (g *Github) handleGithubAuth(resp http.ResponseWriter, req *http.Request) {
q := req.URL.Query()
state := q.Get("state")
if state != oauth2State {
resp.Write([]byte("OAuth2 state was incorrect, something bad happened between Github and us"))
return
}
code := q.Get("code")
cfg := g.getOauthConfig()
token, err := cfg.Exchange(context.Background(), code)
if err != nil {
resp.Write([]byte("Error exchanging OAuth code, something bad happened between Github and us: " + err.Error()))
return
}
core.StoreCache("github:token", token.AccessToken)
g.setClient(token)
resp.Write([]byte("Thanks! you can close this tab now."))
}
func (g *Github) getOauthConfig() *oauth2.Config {
return &oauth2.Config{
ClientID: g.globalConfig.ClientID,
ClientSecret: g.globalConfig.ClientSecret,
Endpoint: githubO2.Endpoint,
Scopes: []string{"repo"},
}
}
func (g *Github) setClient(token *oauth2.Token) {
ts := g.getOauthConfig().TokenSource(oauth2.NoContext, token)
tc := oauth2.NewClient(oauth2.NoContext, ts)
g.client = github.NewClient(tc)
g.clientHasSet.Broadcast()
}
func (g *Github) acquireOauthToken() {
token := core.GetCache("github:token")
if token != "" {
oauth2Token := oauth2.Token{AccessToken: token}
g.setClient(&oauth2Token)
return
}
fmt.Println("")
fmt.Println("This app must be authenticated with github, please visit the following URL to authenticate this app")
fmt.Println(g.getOauthConfig().AuthCodeURL(oauth2State, oauth2.AccessTypeOffline))
fmt.Println("")
}
func (g *Github) init(app core.App) {
if g.client == nil {
app.Config("github", &g.globalConfig)
if g.globalConfig.ClientID == "" || g.globalConfig.ClientSecret == "" {
fmt.Println("Invalid github configuration, missing ClientID/ClientSecret")
} else {
g.clientHasSet.L.Lock()
g.acquireOauthToken()
for g.client == nil {
fmt.Println("Waiting for github authentication response...")
g.clientHasSet.Wait()
}
fmt.Println("Got authentication response")
if repos, _, err := g.client.Repositories.List("", nil); err != nil {
logcritf("Couldn't get repos list after authenticating, something has gone wrong, clear cache and retry")
} else {
fmt.Println("Found repositories:")
for _, repo := range repos {
repostr := fmt.Sprintf("%s/%s ", *repo.Owner.Login, *repo.Name)
if *repo.Private == true {
repostr += "🔒"
}
if *repo.Fork == true {
repostr += "🍴"
}
fmt.Println(repostr)
}
}
g.clientHasSet.L.Unlock()
}
}
}
// AttachToApp ...
func (g *Github) AttachToApp(app core.App) error {
g.m.Lock()
defer g.m.Unlock()
g.init(app)
appConfig := &githubApp{
app: app,
}
app.Config("github", &appConfig.config)
g.apps[app.Name()] = appConfig
g.setupDeployKey(appConfig)
g.setupHooks(appConfig)
app.Listen(core.SignalBuildProvisioning, g.onBuildStarted)
app.Listen(core.SignalBuildComplete, g.onBuildFinished)
return nil
}
func (g *Github) setupDeployKey(appConfig *githubApp) error {
cfg := appConfig.config
// TODO - would be nicer to generate ssh key automatically
if cfg.PublicKey == "" {
logcritf("(%s) No public key available, create one and add it to the configuration", appConfig.app.Name())
return errors.New("No pub key available")
}
keyName := fmt.Sprintf("NGBuild ssh deploy key - %s", appConfig.app.Name())
_, _, err := g.client.Repositories.CreateKey(cfg.Owner, cfg.Repo, &github.Key{
Title: &keyName,
Key: &cfg.PublicKey,
ReadOnly: &[]bool{true}[0],
})
if err != nil && strings.Contains(err.Error(), "key is already in use") == false {
logcritf("Couldn't create deploy key for %s: %s", appConfig.app.Name(), err)
return err
}
return nil
}
func (g *Github) setupHooks(appConfig *githubApp) {
cfg := appConfig.config
_, _, err := g.client.Repositories.Get(cfg.Owner, cfg.Repo)
if err != nil {
logwarnf("(%s) Repository does not exist, owner=%s, repo=%s", appConfig.app.Name(), cfg.Owner, cfg.Repo)
return
}
hookURL := fmt.Sprintf("%s/cb/github/hook/%s", core.GetHTTPServerURL(), appConfig.app.Name())
_, _, err = g.client.Repositories.CreateHook(cfg.Owner, cfg.Repo, &github.Hook{
Name: &[]string{"web"}[0],
Active: &[]bool{true}[0],
Config: map[string]interface{}{
"url": hookURL,
"content_type": "json",
},
Events: []string{"pull_request",
"delete",
"issue_comment",
"pull_request_review",
"pull_request_review_event",
"push",
"status",
},
})
if err != nil && strings.Contains(err.Error(), "Hook already exists") == false {
logwarnf("Could not create webhook, owner=%s, repo=%s: %s", cfg.Owner, cfg.Repo, err)
return
}
}
// Shutdown ...
func (g *Github) Shutdown() {}
// hold the g.m lock when you call this
func (g *Github) trackBuild(build core.Build) {
for | old the g.m.lock when you call this
func (g *Github) untrackBuild(build core.Build) {
buildIndex := -1
for i, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
buildIndex = i
break
}
}
if buildIndex < 0 {
return
}
g.trackedBuilds[buildIndex].Unref()
g.trackedBuilds = append(g.trackedBuilds[:buildIndex], g.trackedBuilds[buildIndex+1:]...)
}
func (g *Github) trackPullRequest(app *githubApp, event *github.PullRequestEvent) {
if event.PullRequest == nil {
logcritf("pull request is nil")
return
}
pull := event.PullRequest
pullID := strconv.Itoa(*pull.ID)
// first thing we need to do is check to see if this pull request comes from a collaborator
// otherwise we are letting randos run arbutary code on our system. this will be essentially until
// we have some filesystem container system
owner := *pull.Base.Repo.Owner.Login
repo := *pull.Base.Repo.Name
user := *pull.User.Login
isCollaborator, _, err := g.client.Repositories.IsCollaborator(owner, repo, user)
if err != nil {
logcritf("Couldn't check collaborator status on %s: %s", pullID, err)
return
} else if isCollaborator == false {
logwarnf("Ignoring pull request %s, non collaborator: %s", pullID, user)
return
}
g.m.Lock()
defer g.m.Unlock()
// check for ignored branches
for _, branchIgnore := range app.config.IgnoredBranches {
if branchIgnore == *pull.Base.Ref {
logwarnf("Ignoring pull request %s, is an ignored branch", pullID)
return
}
}
g.trackedPullRequests[pullID] = pullRequestStatus{
pull: pull,
}
g.buildPullRequest(app, pull)
}
func (g *Github) buildPullRequest(app *githubApp, pull *github.PullRequest) {
// for reference, head is the proposed branch, base is the branch to merge into
pullID := strconv.Itoa(*pull.ID)
loginfof("Building pull request: %s", pullID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
status = pullRequestStatus{pull, "", false}
g.trackedPullRequests[pullID] = status
}
// we want to check to see if we are already building or already built this commit
// and we want to cancel the previous build
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if build.Config().GetMetadata("github:HeadHash") == *pull.Head.SHA {
logwarnf("Already building/built this commit")
return
}
if app.config.CancelOnNewCommit {
build.Stop()
}
}
headBranch := *pull.Head.Ref
headCloneURL := *pull.Head.Repo.SSHURL
headCommit := *pull.Head.SHA
headOwner := *pull.Head.Repo.Owner.Login
headRepo := *pull.Head.Repo.Name
baseBranch := *pull.Base.Ref
baseCloneURL := *pull.Base.Repo.SSHURL
baseOwner := *pull.Base.Repo.Owner.Login
baseRepo := *pull.Base.Repo.Name
baseCommit := *pull.Base.SHA
buildConfig := core.NewBuildConfig()
buildConfig.Title = *pull.Title
buildConfig.URL = *pull.HTMLURL
buildConfig.HeadRepo = headCloneURL
buildConfig.HeadBranch = headBranch
buildConfig.HeadHash = headCommit
buildConfig.BaseRepo = baseCloneURL
buildConfig.BaseBranch = baseBranch
buildConfig.BaseHash = ""
buildConfig.Group = pullID
buildConfig.SetMetadata("github:BuildType", "pullrequest")
buildConfig.SetMetadata("github:PullRequestID", pullID)
buildConfig.SetMetadata("github:PullNumber", fmt.Sprintf("%d", *pull.Number))
buildConfig.SetMetadata("github:HeadHash", headCommit)
buildConfig.SetMetadata("github:HeadOwner", headOwner)
buildConfig.SetMetadata("github:HeadRepo", headRepo)
buildConfig.SetMetadata("github:BaseHash", baseCommit)
buildConfig.SetMetadata("github:BaseOwner", baseOwner)
buildConfig.SetMetadata("github:BaseRepo", baseRepo)
buildToken, err := app.app.NewBuild(buildConfig.Group, buildConfig)
if err != nil {
logcritf("Couldn't start build for %d", *pull.ID)
return
}
build, err := app.app.GetBuild(buildToken)
if err != nil || build == nil {
logcritf("Couldn't get build for %d", *pull.ID)
return
}
status.currentBuild = buildToken
g.trackedPullRequests[pullID] = status
loginfof("started build: %s", buildToken)
}
func (g *Github) updatePullRequest(app *githubApp, event *github.PullRequestEvent) {
// this is called when there is a new commit on the pull request or something like that
pullID := strconv.Itoa(*event.PullRequest.ID)
g.m.RLock()
_, ok := g.trackedPullRequests[pullID]
g.m.RUnlock()
if ok == false {
logwarnf("event on unknown/ignored pull request: %s", pullID)
g.trackPullRequest(app, event)
return
}
g.buildPullRequest(app, event.PullRequest)
}
func (g *Github) closedPullRequest(app *githubApp, event *github.PullRequestEvent) {
g.m.RLock()
defer g.m.RUnlock()
pullID := strconv.Itoa(*event.PullRequest.ID)
status, ok := g.trackedPullRequests[pullID]
if ok == false {
return
}
if build, _ := app.app.GetBuild(status.currentBuild); build != nil {
if app.config.CancelOnNewCommit {
build.Stop()
}
}
delete(g.trackedPullRequests, pullID)
}
func loginfof(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-info: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logwarnf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-warn: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
func logcritf(str string, args ...interface{}) (ret string) {
ret = fmt.Sprintf("github-crit: "+str+"\n", args...)
fmt.Printf(ret)
return ret
}
| _, trackedBuild := range g.trackedBuilds {
if trackedBuild.Token() == build.Token() {
return
}
}
build.Ref()
g.trackedBuilds = append(g.trackedBuilds, build)
}
// h | identifier_body |
image_utils.py | #%% Image class
import logging
import imutils
import numpy as np
import sklearn as sk
# import sklearn.cluster
import cv2
import pandas as pd
import base64
class SimpleImage:
def __init__(self, image_id):
pass
class Image():
def __init__(self, image_id):
"""An object representing a full satellite image with ships and it's metadata.
An image record is identified by the init parameter `image_id`.
Images are stored in a zip file, and loaded on demand using the `.load(zip_path, df)`. method.
The zip file path is needed, as well as a Dataframe object holding the metadata. The
metadata for an image is the list of ship location records.
:param image_id:
Attributes:
image_id The ID string
img The image as an ndarray
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string
EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
|
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to 0-1 range".format(img.shape))
logging.info("Scaled image to [0-1] and reshaped to {}".format(new_shape))
predicted_cluster = kmeans.predict(original_pixels)
# TODO: Document this Numpy behaviour - indexing one array with an integer array of indices
# Creates a new array, of length equal to indexing array
# test_a = predicted_cluster[0:10]
# test_asdf = kmeans.cluster_centers_
# test_asdf[test_a]
img_clustered_pixels = kmeans.cluster_centers_[predicted_cluster]
logging.info("Assigned each pixel to a cluster (color vector).".format())
img_clustered_pixels = img_clustered_pixels.reshape(img.shape)
logging.info("Reshape pixels back to original shape".format())
logging.info("Returning KMeans fit image canvas".format())
# img_clustered_pixels
return img_clustered_pixels
def convert_rgb_img_to_b64string_straight(img):
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
def convert_rgb_img_to_b64string(img):
# Convert image to BGR from RGB
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
| start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones | conditional_block |
image_utils.py | #%% Image class
import logging
import imutils
import numpy as np
import sklearn as sk
# import sklearn.cluster
import cv2
import pandas as pd
import base64
class SimpleImage:
def __init__(self, image_id):
pass
class Image():
def __init__(self, image_id):
"""An object representing a full satellite image with ships and it's metadata.
An image record is identified by the init parameter `image_id`.
Images are stored in a zip file, and loaded on demand using the `.load(zip_path, df)`. method.
The zip file path is needed, as well as a Dataframe object holding the metadata. The
metadata for an image is the list of ship location records.
:param image_id:
Attributes:
image_id The ID string
img The image as an ndarray
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string
EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to 0-1 range".format(img.shape))
logging.info("Scaled image to [0-1] and reshaped to {}".format(new_shape))
predicted_cluster = kmeans.predict(original_pixels)
# TODO: Document this Numpy behaviour - indexing one array with an integer array of indices
# Creates a new array, of length equal to indexing array
# test_a = predicted_cluster[0:10]
# test_asdf = kmeans.cluster_centers_
# test_asdf[test_a]
img_clustered_pixels = kmeans.cluster_centers_[predicted_cluster]
logging.info("Assigned each pixel to a cluster (color vector).".format())
img_clustered_pixels = img_clustered_pixels.reshape(img.shape)
logging.info("Reshape pixels back to original shape".format())
logging.info("Returning KMeans fit image canvas".format())
# img_clustered_pixels
return img_clustered_pixels
def convert_rgb_img_to_b64string_straight(img):
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
def convert_rgb_img_to_b64string(img):
# Convert image to BGR from RGB
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text | identifier_body | |
image_utils.py | #%% Image class
import logging
import imutils
import numpy as np
import sklearn as sk
# import sklearn.cluster
import cv2
import pandas as pd
import base64
class SimpleImage:
def __init__(self, image_id):
pass
class Image():
def __init__(self, image_id):
"""An object representing a full satellite image with ships and it's metadata.
An image record is identified by the init parameter `image_id`.
Images are stored in a zip file, and loaded on demand using the `.load(zip_path, df)`. method.
The zip file path is needed, as well as a Dataframe object holding the metadata. The
metadata for an image is the list of ship location records.
:param image_id:
Attributes:
image_id The ID string | EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def draw_ellipses_img(self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to 0-1 range".format(img.shape))
logging.info("Scaled image to [0-1] and reshaped to {}".format(new_shape))
predicted_cluster = kmeans.predict(original_pixels)
# TODO: Document this Numpy behaviour - indexing one array with an integer array of indices
# Creates a new array, of length equal to indexing array
# test_a = predicted_cluster[0:10]
# test_asdf = kmeans.cluster_centers_
# test_asdf[test_a]
img_clustered_pixels = kmeans.cluster_centers_[predicted_cluster]
logging.info("Assigned each pixel to a cluster (color vector).".format())
img_clustered_pixels = img_clustered_pixels.reshape(img.shape)
logging.info("Reshape pixels back to original shape".format())
logging.info("Returning KMeans fit image canvas".format())
# img_clustered_pixels
return img_clustered_pixels
def convert_rgb_img_to_b64string_straight(img):
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
def convert_rgb_img_to_b64string(img):
# Convert image to BGR from RGB
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text | img The image as an ndarray
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string | random_line_split |
image_utils.py | #%% Image class
import logging
import imutils
import numpy as np
import sklearn as sk
# import sklearn.cluster
import cv2
import pandas as pd
import base64
class SimpleImage:
def __init__(self, image_id):
pass
class Image():
def __init__(self, image_id):
"""An object representing a full satellite image with ships and it's metadata.
An image record is identified by the init parameter `image_id`.
Images are stored in a zip file, and loaded on demand using the `.load(zip_path, df)`. method.
The zip file path is needed, as well as a Dataframe object holding the metadata. The
metadata for an image is the list of ship location records.
:param image_id:
Attributes:
image_id The ID string
img The image as an ndarray
records DataFrame of records from the original CSV file
encoding A string representing the OpenCV encoding of the underlying img ndarray
ships A list of Ship dictionary entries
ship_id - Hash of the RLE string
EncodedPixels - RLE string
center -
"""
self.image_id = image_id
self.encoding = None
self.records = None
self.img = None
self.contours = None
logging.info("Image id: {}".format(self.image_id))
def __str__(self):
return "Image ID {} {} encoded, with {} ships".format(self.image_id, self.encoding, self.num_ships)
@property
def num_ships(self):
if len(self.records) == 1:
rec = self.records.head(1)
if isinstance(rec['EncodedPixels'].values[0], str):
return 1
else:
return 0
else:
return len(self.records)
@property
def shape(self):
return self.img.shape
@property
def shape2D(self):
return self.img.shape[0:2]
def get_img_bgr(self):
return cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR)
def load(self, image_zip, df):
"""load an image into ndarray as RGB, and load ship records
:param image_zip:
:param df:
:return:
"""
self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)
# TODO: FOR TESTING ONLY!!!!
# self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)
# self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)
# logging.info("DEBUG ON: ".format())
# print(self.img)
# print(self.img.shape)
self.encoding = 'RGB'
logging.info("Loaded {}, size {} ".format(self.image_id, self.img.shape))
# TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!
self.records = df.loc[df.index == self.image_id, :].copy()
assert isinstance(self.records, pd.DataFrame)
self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)
self.records.set_index('ship_id', inplace=True)
self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)
logging.info("{} records selected for {}".format(len(self.records), self.image_id))
def moments(self):
""" Just a docstring for now
// spatial moments
double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
// central moments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
// central normalized moments
double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
:return:
"""
def load_ships(self):
"""Augment the basic df with mask, contour, data
mask - ndarray of 0 or 1
contour - opencv2 contour object
moments -
:return:
"""
assert isinstance(self.img, np.ndarray), "No image loaded"
assert self.num_ships, "No ships in this image"
# TODO: check warnings
self.records['mask'] = self.records.apply(lambda row: self.convert_rle_to_mask(row['EncodedPixels'], self.shape2D), axis=1)
self.records['contour'] = self.records.apply(lambda row: self.get_contour(row['mask']), axis=1)
self.records['moments'] = self.records.apply(lambda row: cv2.moments(row['contour']), axis=1)
# def get_x(row): return round(row['moments']['m10'] / row['moments']['m00'])
def get_x(row): return row['moments']['m10'] / row['moments']['m00']
# def get_y(row): return round(row['moments']['m01'] / row['moments']['m00'])
def get_y(row): return row['moments']['m01'] / row['moments']['m00']
self.records['x'] = self.records.apply(lambda row: get_x(row), axis=1)
self.records['y'] = self.records.apply(lambda row: get_y(row), axis=1)
# ( Same as m00!)
self.records['area'] = self.records.apply(lambda row: cv2.contourArea(row['contour']), axis=1)
self.records['rotated_rect'] = self.records.apply(lambda row: cv2.minAreaRect(row['contour']), axis=1)
self.records['angle'] = self.records.apply(lambda row: row['rotated_rect'][2], axis=1)
def ship_summary_table(self):
if self.num_ships:
df_summary = self.records.copy()
df_summary.drop(['mask', 'contour', 'moments', 'rotated_rect', 'EncodedPixels'], axis=1, inplace=True)
df_summary.reset_index(drop=True, inplace=True)
df_summary.insert(0, 'ship', range(0, len(df_summary)))
logging.info("Generating summary table".format())
return df_summary.round(1)
else:
return None
def convert_rle_to_mask(self, rle, shape):
"""convert RLE mask into 2d pixel array"""
# Initialize a zero canvas (one-dimensional here)
mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# Split each run-length string
s = rle.split()
for i in range(len(s) // 2):
start = int(s[2 * i]) - 1
length = int(s[2 * i + 1])
mask[start:start + length] = 1 # Assign this run to ones
# Reshape to 2D
img2 = mask.reshape(shape).T
return img2
def get_contour(self, mask):
"""Return a cv2 contour object from a binary 0/1 mask"""
assert mask.ndim == 2
assert mask.min() == 0
assert mask.max() == 1
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
assert len(contours) == 1, "Too many contours in this mask!"
contour = contours[0]
# logging.debug("Returning {} fit contours over mask pixels".format(len(contours)))
return contour
def | (self):
logging.info("Fitting and drawing ellipses on a new ndarray canvas.".format())
canvas = self.img
for idx, rec in self.records.iterrows():
# logging.debug("Processing record {} of {}".format(cnt, image_id))
# contour = imutils.get_contour(rec['mask'])
# img = imutils.draw_ellipse_and_axis(img, contour, thickness=2)
# print(rec)
# print(rec['contour'])
canvas = imutils.fit_draw_ellipse(canvas, rec['contour'], thickness=2)
return canvas
def k_means(self, num_clusters=2):
logging.info("Processing {} image of shape {}".format(self.encoding, self.img.shape))
data = self.img / 255
logging.info("Scaled values to 0-1 range".format())
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
logging.info("Reshape to pixel list {}".format(data.shape))
kmeans = sk.cluster.MiniBatchKMeans(num_clusters)
kmeans.fit(data)
logging.info("Fit {} pixels into {} clusters".format(data.shape[0], num_clusters))
unique, counts = np.unique(kmeans.labels_, return_counts=True)
for c_name, c_count, c_position in zip(unique, counts, kmeans.cluster_centers_):
logging.info("\tCluster {} at {} with {:0.1%} of the pixels".format(c_name, np.around(c_position, 3), c_count/data.shape[0])),
if len(unique) == 2:
dist = np.linalg.norm(kmeans.cluster_centers_[0] - kmeans.cluster_centers_[1])
logging.info("Distance between c1 and c2: {}".format(dist))
return kmeans
# all_new_colors = kmeans.cluster_centers_[kmeans.predict(data)]
def fit_kmeans_pixels(img, kmeans):
"""
:param img: An RGB image
:param kmeans: The fit KMeans sci-kit object over this image
:return: A new image, fit to the clusters of the image
"""
original_pixels = img / 255
# original_pixels = image.img.copy() / 255
new_shape = original_pixels.shape[0] * original_pixels.shape[1], original_pixels.shape[2]
original_pixels = original_pixels.reshape(new_shape)
# logging.info("Reshape to pixel list {}".format(original_pixels.shape))
# logging.info("Changed values to 0-1 range".format(img.shape))
logging.info("Scaled image to [0-1] and reshaped to {}".format(new_shape))
predicted_cluster = kmeans.predict(original_pixels)
# TODO: Document this Numpy behaviour - indexing one array with an integer array of indices
# Creates a new array, of length equal to indexing array
# test_a = predicted_cluster[0:10]
# test_asdf = kmeans.cluster_centers_
# test_asdf[test_a]
img_clustered_pixels = kmeans.cluster_centers_[predicted_cluster]
logging.info("Assigned each pixel to a cluster (color vector).".format())
img_clustered_pixels = img_clustered_pixels.reshape(img.shape)
logging.info("Reshape pixels back to original shape".format())
logging.info("Returning KMeans fit image canvas".format())
# img_clustered_pixels
return img_clustered_pixels
def convert_rgb_img_to_b64string_straight(img):
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
def convert_rgb_img_to_b64string(img):
# Convert image to BGR from RGB
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Encode the in-memory image to .jpg format
retval, buffer = cv2.imencode('.jpg', img)
# Convert to base64 raw bytes
jpg_as_text = base64.b64encode(buffer)
# Decode the bytes to utf
jpg_as_text = jpg_as_text.decode(encoding="utf-8")
logging.info("Image encoded to jpg base64 string".format())
return jpg_as_text
| draw_ellipses_img | identifier_name |
reconciler.go | package system
import (
"bytes"
"context"
"fmt"
"strings"
"text/template"
"time"
"github.com/noobaa/noobaa-operator/build/_output/bundle"
nbv1 "github.com/noobaa/noobaa-operator/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/pkg/nb"
"github.com/noobaa/noobaa-operator/pkg/options"
"github.com/noobaa/noobaa-operator/pkg/util"
dockerref "github.com/docker/distribution/reference"
semver "github.com/hashicorp/go-version"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
// ContainerImageConstraint is the instantiated semver contraints used for image verification
ContainerImageConstraint, _ = semver.NewConstraint(options.ContainerImageConstraintSemver)
// NooBaaType is and empty noobaa struct used for passing the object type
NooBaaType = &nbv1.NooBaa{}
)
// Reconciler is the context for loading or reconciling a noobaa system
type Reconciler struct {
Request types.NamespacedName
Client client.Client
Scheme *runtime.Scheme
Ctx context.Context
Logger *logrus.Entry
Recorder record.EventRecorder
NBClient nb.Client
NooBaa *nbv1.NooBaa
CoreApp *appsv1.StatefulSet
ServiceMgmt *corev1.Service
ServiceS3 *corev1.Service
SecretServer *corev1.Secret
SecretOp *corev1.Secret
SecretAdmin *corev1.Secret
}
// NewReconciler initializes a reconciler to be used for loading or reconciling a noobaa system
func NewReconciler(
req types.NamespacedName,
client client.Client,
scheme *runtime.Scheme,
recorder record.EventRecorder,
) *Reconciler {
r := &Reconciler{
Request: req,
Client: client,
Scheme: scheme,
Recorder: recorder,
Ctx: context.TODO(),
Logger: logrus.WithFields(logrus.Fields{"ns": req.Namespace}),
NooBaa: util.KubeObject(bundle.File_deploy_crds_noobaa_v1alpha1_noobaa_cr_yaml).(*nbv1.NooBaa),
CoreApp: util.KubeObject(bundle.File_deploy_internal_statefulset_core_yaml).(*appsv1.StatefulSet),
ServiceMgmt: util.KubeObject(bundle.File_deploy_internal_service_mgmt_yaml).(*corev1.Service),
ServiceS3: util.KubeObject(bundle.File_deploy_internal_service_s3_yaml).(*corev1.Service),
SecretServer: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretOp: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretAdmin: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
}
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
// Set Namespace
r.NooBaa.Namespace = r.Request.Namespace
r.CoreApp.Namespace = r.Request.Namespace
r.ServiceMgmt.Namespace = r.Request.Namespace
r.ServiceS3.Namespace = r.Request.Namespace
r.SecretServer.Namespace = r.Request.Namespace
r.SecretOp.Namespace = r.Request.Namespace
r.SecretAdmin.Namespace = r.Request.Namespace
// Set Names
r.NooBaa.Name = r.Request.Name
r.CoreApp.Name = r.Request.Name + "-core"
r.ServiceMgmt.Name = r.Request.Name + "-mgmt"
r.ServiceS3.Name = "s3"
r.SecretServer.Name = r.Request.Name + "-server"
r.SecretOp.Name = r.Request.Name + "-operator"
r.SecretAdmin.Name = r.Request.Name + "-admin"
return r
}
// Load reads the state of the kubernetes objects of the system
func (r *Reconciler) Load() {
util.KubeCheck(r.NooBaa) | util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
}
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
log := r.Logger.WithField("func", "Reconcile")
log.Infof("Start ...")
util.KubeCheck(r.NooBaa)
if r.NooBaa.UID == "" {
log.Infof("NooBaa not found or already deleted. Skip reconcile.")
return reconcile.Result{}, nil
}
err := r.RunReconcile()
if util.IsPersistentError(err) {
log.Errorf("❌ Persistent Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
log.Infof("Parsed image (NamedTagged) %v", image)
imageName = image.Name()
imageTag = image.Tag()
case dockerref.Tagged:
log.Infof("Parsed image (Tagged) %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image %q contraints %q", imageRef.String(), ContainerImageConstraint.String())
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image version requested %q, I hope you know what you're doing ...`, imageRef)
}
}
} else {
log.Infof("Using custom image name %q the default is %q", imageRef.String(), options.ContainerImageName)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image requested %q, I hope you know what you're doing ...`, imageRef)
}
}
// Set ActualImage to be updated in the noobaa status
r.NooBaa.Status.ActualImage = specImage
return nil
}
// CheckServiceStatus populates the status of a service by detecting all of its addresses
func (r *Reconciler) CheckServiceStatus(srv *corev1.Service, status *nbv1.ServiceStatus, portName string) {
log := r.Logger.WithField("func", "CheckServiceStatus").WithField("service", srv.Name)
*status = nbv1.ServiceStatus{}
servicePort := nb.FindPortByName(srv, portName)
proto := "http"
if strings.HasSuffix(portName, "https") {
proto = "https"
}
// Node IP:Port
// Pod IP:Port
pods := corev1.PodList{}
podsListOptions := &client.ListOptions{
Namespace: r.Request.Namespace,
LabelSelector: labels.SelectorFromSet(srv.Spec.Selector),
}
err := r.Client.List(r.Ctx, podsListOptions, &pods)
if err == nil {
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
if pod.Status.HostIP != "" {
status.NodePorts = append(
status.NodePorts,
fmt.Sprintf("%s://%s:%d", proto, pod.Status.HostIP, servicePort.NodePort),
)
}
if pod.Status.PodIP != "" {
status.PodPorts = append(
status.PodPorts,
fmt.Sprintf("%s://%s:%s", proto, pod.Status.PodIP, servicePort.TargetPort.String()),
)
}
}
}
}
// Cluster IP:Port (of the service)
if srv.Spec.ClusterIP != "" {
status.InternalIP = append(
status.InternalIP,
fmt.Sprintf("%s://%s:%d", proto, srv.Spec.ClusterIP, servicePort.Port),
)
status.InternalDNS = append(
status.InternalDNS,
fmt.Sprintf("%s://%s.%s:%d", proto, srv.Name, srv.Namespace, servicePort.Port),
)
}
// LoadBalancer IP:Port (of the service)
if srv.Status.LoadBalancer.Ingress != nil {
for _, lb := range srv.Status.LoadBalancer.Ingress {
if lb.IP != "" {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, lb.IP, servicePort.Port),
)
}
if lb.Hostname != "" {
status.ExternalDNS = append(
status.ExternalDNS,
fmt.Sprintf("%s://%s:%d", proto, lb.Hostname, servicePort.Port),
)
}
}
}
// External IP:Port (of the service)
if srv.Spec.ExternalIPs != nil {
for _, ip := range srv.Spec.ExternalIPs {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, ip, servicePort.Port),
)
}
}
log.Infof("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
log := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: secretAdminName,
Labels: map[string]string{"app": "noobaa"},
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"system": name,
"email": options.AdminAccountEmail,
"password": string(r.SecretOp.Data["password"]),
},
}
log.Infof("listing accounts")
res, err := r.NBClient.ListAccountsAPI()
if err != nil {
return err
}
for _, account := range res.Accounts {
if account.Email == options.AdminAccountEmail {
if len(account.AccessKeys) > 0 {
r.SecretAdmin.StringData["AWS_ACCESS_KEY_ID"] = account.AccessKeys[0].AccessKey
r.SecretAdmin.StringData["AWS_SECRET_ACCESS_KEY"] = account.AccessKeys[0].SecretKey
}
}
}
r.Own(r.SecretAdmin)
return r.Client.Create(r.Ctx, r.SecretAdmin)
}
var readmeTemplate = template.Must(template.New("NooBaaSystem.Status.Readme").Parse(`
Welcome to NooBaa!
-----------------
Lets get started:
1. Connect to Management console:
Read your mgmt console login information (email & password) from secret: "{{.SecretAdmin.Name}}".
kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq '.data|map_values(@base64d)'
Open the management console service - take External IP/DNS or Node Port or use port forwarding:
kubectl port-forward -n {{.ServiceMgmt.Namespace}} service/{{.ServiceMgmt.Name}} 11443:8443 &
open https://localhost:11443
2. Test S3 client:
kubectl port-forward -n {{.ServiceS3.Namespace}} service/{{.ServiceS3.Name}} 10443:443 &
NOOBAA_ACCESS_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d')
NOOBAA_SECRET_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d')
alias s3='AWS_ACCESS_KEY_ID=$NOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=$NOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3'
s3 ls
`))
// Complete populates the noobaa status at the end of reconcile.
func (r *Reconciler) Complete() error {
var readmeBuffer bytes.Buffer
err := readmeTemplate.Execute(&readmeBuffer, r)
if err != nil {
return err
}
r.NooBaa.Status.Readme = readmeBuffer.String()
r.NooBaa.Status.Accounts.Admin.SecretRef.Name = r.SecretAdmin.Name
r.NooBaa.Status.Accounts.Admin.SecretRef.Namespace = r.SecretAdmin.Namespace
return nil
}
// Own sets the object owner references to the noobaa system
func (r *Reconciler) Own(obj metav1.Object) {
util.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))
}
// GetObject gets an object by name from the request namespace.
func (r *Reconciler) GetObject(name string, obj runtime.Object) error {
return r.Client.Get(r.Ctx, client.ObjectKey{Namespace: r.Request.Namespace, Name: name}, obj)
}
// ReconcileObject is a generic call to reconcile a kubernetes object
// desiredFunc can be passed to modify the object before create/update.
// Currently we ignore enforcing a desired state, but it might be needed on upgrades.
func (r *Reconciler) ReconcileObject(obj runtime.Object, desiredFunc func()) error {
kind := obj.GetObjectKind().GroupVersionKind().Kind
objMeta, _ := meta.Accessor(obj)
log := r.Logger.
WithField("func", "ReconcileObject").
WithField("kind", kind).
WithField("name", objMeta.GetName())
r.Own(objMeta)
op, err := controllerutil.CreateOrUpdate(
r.Ctx, r.Client, obj.(runtime.Object),
func(obj runtime.Object) error {
if desiredFunc != nil {
desiredFunc()
}
return nil
},
)
if err != nil {
log.Errorf("ReconcileObject Failed: %v", err)
return err
}
log.Infof("Done. op=%s", op)
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPhase(phase nbv1.SystemPhase) {
r.Logger.Infof("SetPhase: %s", phase)
r.NooBaa.Status.Phase = phase
conditions := &r.NooBaa.Status.Conditions
reason := fmt.Sprintf("NooBaaSystemPhase%s", phase)
message := fmt.Sprintf("NooBaa operator system reconcile phase %s", phase)
switch phase {
case nbv1.SystemPhaseReady:
util.SetAvailableCondition(conditions, reason, message)
case nbv1.SystemPhaseRejected:
// handle rejected here too?
default:
util.SetProgressingCondition(conditions, reason, message)
}
} | util.KubeCheck(r.CoreApp)
util.KubeCheck(r.ServiceMgmt)
util.KubeCheck(r.ServiceS3)
util.KubeCheck(r.SecretServer)
util.KubeCheck(r.SecretOp) | random_line_split |
reconciler.go | package system
import (
"bytes"
"context"
"fmt"
"strings"
"text/template"
"time"
"github.com/noobaa/noobaa-operator/build/_output/bundle"
nbv1 "github.com/noobaa/noobaa-operator/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/pkg/nb"
"github.com/noobaa/noobaa-operator/pkg/options"
"github.com/noobaa/noobaa-operator/pkg/util"
dockerref "github.com/docker/distribution/reference"
semver "github.com/hashicorp/go-version"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
// ContainerImageConstraint is the instantiated semver contraints used for image verification
ContainerImageConstraint, _ = semver.NewConstraint(options.ContainerImageConstraintSemver)
// NooBaaType is and empty noobaa struct used for passing the object type
NooBaaType = &nbv1.NooBaa{}
)
// Reconciler is the context for loading or reconciling a noobaa system
type Reconciler struct {
Request types.NamespacedName
Client client.Client
Scheme *runtime.Scheme
Ctx context.Context
Logger *logrus.Entry
Recorder record.EventRecorder
NBClient nb.Client
NooBaa *nbv1.NooBaa
CoreApp *appsv1.StatefulSet
ServiceMgmt *corev1.Service
ServiceS3 *corev1.Service
SecretServer *corev1.Secret
SecretOp *corev1.Secret
SecretAdmin *corev1.Secret
}
// NewReconciler initializes a reconciler to be used for loading or reconciling a noobaa system
func NewReconciler(
req types.NamespacedName,
client client.Client,
scheme *runtime.Scheme,
recorder record.EventRecorder,
) *Reconciler {
r := &Reconciler{
Request: req,
Client: client,
Scheme: scheme,
Recorder: recorder,
Ctx: context.TODO(),
Logger: logrus.WithFields(logrus.Fields{"ns": req.Namespace}),
NooBaa: util.KubeObject(bundle.File_deploy_crds_noobaa_v1alpha1_noobaa_cr_yaml).(*nbv1.NooBaa),
CoreApp: util.KubeObject(bundle.File_deploy_internal_statefulset_core_yaml).(*appsv1.StatefulSet),
ServiceMgmt: util.KubeObject(bundle.File_deploy_internal_service_mgmt_yaml).(*corev1.Service),
ServiceS3: util.KubeObject(bundle.File_deploy_internal_service_s3_yaml).(*corev1.Service),
SecretServer: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretOp: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretAdmin: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
}
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
// Set Namespace
r.NooBaa.Namespace = r.Request.Namespace
r.CoreApp.Namespace = r.Request.Namespace
r.ServiceMgmt.Namespace = r.Request.Namespace
r.ServiceS3.Namespace = r.Request.Namespace
r.SecretServer.Namespace = r.Request.Namespace
r.SecretOp.Namespace = r.Request.Namespace
r.SecretAdmin.Namespace = r.Request.Namespace
// Set Names
r.NooBaa.Name = r.Request.Name
r.CoreApp.Name = r.Request.Name + "-core"
r.ServiceMgmt.Name = r.Request.Name + "-mgmt"
r.ServiceS3.Name = "s3"
r.SecretServer.Name = r.Request.Name + "-server"
r.SecretOp.Name = r.Request.Name + "-operator"
r.SecretAdmin.Name = r.Request.Name + "-admin"
return r
}
// Load reads the state of the kubernetes objects of the system
func (r *Reconciler) Load() {
util.KubeCheck(r.NooBaa)
util.KubeCheck(r.CoreApp)
util.KubeCheck(r.ServiceMgmt)
util.KubeCheck(r.ServiceS3)
util.KubeCheck(r.SecretServer)
util.KubeCheck(r.SecretOp)
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
}
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
log := r.Logger.WithField("func", "Reconcile")
log.Infof("Start ...")
util.KubeCheck(r.NooBaa)
if r.NooBaa.UID == "" {
log.Infof("NooBaa not found or already deleted. Skip reconcile.")
return reconcile.Result{}, nil
}
err := r.RunReconcile()
if util.IsPersistentError(err) {
log.Errorf("❌ Persistent Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
| r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
log.Infof("Parsed image (NamedTagged) %v", image)
imageName = image.Name()
imageTag = image.Tag()
case dockerref.Tagged:
log.Infof("Parsed image (Tagged) %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image %q contraints %q", imageRef.String(), ContainerImageConstraint.String())
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image version requested %q, I hope you know what you're doing ...`, imageRef)
}
}
} else {
log.Infof("Using custom image name %q the default is %q", imageRef.String(), options.ContainerImageName)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image requested %q, I hope you know what you're doing ...`, imageRef)
}
}
// Set ActualImage to be updated in the noobaa status
r.NooBaa.Status.ActualImage = specImage
return nil
}
// CheckServiceStatus populates the status of a service by detecting all of its addresses
func (r *Reconciler) CheckServiceStatus(srv *corev1.Service, status *nbv1.ServiceStatus, portName string) {
log := r.Logger.WithField("func", "CheckServiceStatus").WithField("service", srv.Name)
*status = nbv1.ServiceStatus{}
servicePort := nb.FindPortByName(srv, portName)
proto := "http"
if strings.HasSuffix(portName, "https") {
proto = "https"
}
// Node IP:Port
// Pod IP:Port
pods := corev1.PodList{}
podsListOptions := &client.ListOptions{
Namespace: r.Request.Namespace,
LabelSelector: labels.SelectorFromSet(srv.Spec.Selector),
}
err := r.Client.List(r.Ctx, podsListOptions, &pods)
if err == nil {
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
if pod.Status.HostIP != "" {
status.NodePorts = append(
status.NodePorts,
fmt.Sprintf("%s://%s:%d", proto, pod.Status.HostIP, servicePort.NodePort),
)
}
if pod.Status.PodIP != "" {
status.PodPorts = append(
status.PodPorts,
fmt.Sprintf("%s://%s:%s", proto, pod.Status.PodIP, servicePort.TargetPort.String()),
)
}
}
}
}
// Cluster IP:Port (of the service)
if srv.Spec.ClusterIP != "" {
status.InternalIP = append(
status.InternalIP,
fmt.Sprintf("%s://%s:%d", proto, srv.Spec.ClusterIP, servicePort.Port),
)
status.InternalDNS = append(
status.InternalDNS,
fmt.Sprintf("%s://%s.%s:%d", proto, srv.Name, srv.Namespace, servicePort.Port),
)
}
// LoadBalancer IP:Port (of the service)
if srv.Status.LoadBalancer.Ingress != nil {
for _, lb := range srv.Status.LoadBalancer.Ingress {
if lb.IP != "" {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, lb.IP, servicePort.Port),
)
}
if lb.Hostname != "" {
status.ExternalDNS = append(
status.ExternalDNS,
fmt.Sprintf("%s://%s:%d", proto, lb.Hostname, servicePort.Port),
)
}
}
}
// External IP:Port (of the service)
if srv.Spec.ExternalIPs != nil {
for _, ip := range srv.Spec.ExternalIPs {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, ip, servicePort.Port),
)
}
}
log.Infof("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
log := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: secretAdminName,
Labels: map[string]string{"app": "noobaa"},
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"system": name,
"email": options.AdminAccountEmail,
"password": string(r.SecretOp.Data["password"]),
},
}
log.Infof("listing accounts")
res, err := r.NBClient.ListAccountsAPI()
if err != nil {
return err
}
for _, account := range res.Accounts {
if account.Email == options.AdminAccountEmail {
if len(account.AccessKeys) > 0 {
r.SecretAdmin.StringData["AWS_ACCESS_KEY_ID"] = account.AccessKeys[0].AccessKey
r.SecretAdmin.StringData["AWS_SECRET_ACCESS_KEY"] = account.AccessKeys[0].SecretKey
}
}
}
r.Own(r.SecretAdmin)
return r.Client.Create(r.Ctx, r.SecretAdmin)
}
var readmeTemplate = template.Must(template.New("NooBaaSystem.Status.Readme").Parse(`
Welcome to NooBaa!
-----------------
Lets get started:
1. Connect to Management console:
Read your mgmt console login information (email & password) from secret: "{{.SecretAdmin.Name}}".
kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq '.data|map_values(@base64d)'
Open the management console service - take External IP/DNS or Node Port or use port forwarding:
kubectl port-forward -n {{.ServiceMgmt.Namespace}} service/{{.ServiceMgmt.Name}} 11443:8443 &
open https://localhost:11443
2. Test S3 client:
kubectl port-forward -n {{.ServiceS3.Namespace}} service/{{.ServiceS3.Name}} 10443:443 &
NOOBAA_ACCESS_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d')
NOOBAA_SECRET_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d')
alias s3='AWS_ACCESS_KEY_ID=$NOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=$NOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3'
s3 ls
`))
// Complete populates the noobaa status at the end of reconcile.
func (r *Reconciler) Complete() error {
var readmeBuffer bytes.Buffer
err := readmeTemplate.Execute(&readmeBuffer, r)
if err != nil {
return err
}
r.NooBaa.Status.Readme = readmeBuffer.String()
r.NooBaa.Status.Accounts.Admin.SecretRef.Name = r.SecretAdmin.Name
r.NooBaa.Status.Accounts.Admin.SecretRef.Namespace = r.SecretAdmin.Namespace
return nil
}
// Own sets the object owner references to the noobaa system
func (r *Reconciler) Own(obj metav1.Object) {
util.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))
}
// GetObject gets an object by name from the request namespace.
func (r *Reconciler) GetObject(name string, obj runtime.Object) error {
return r.Client.Get(r.Ctx, client.ObjectKey{Namespace: r.Request.Namespace, Name: name}, obj)
}
// ReconcileObject is a generic call to reconcile a kubernetes object
// desiredFunc can be passed to modify the object before create/update.
// Currently we ignore enforcing a desired state, but it might be needed on upgrades.
func (r *Reconciler) ReconcileObject(obj runtime.Object, desiredFunc func()) error {
kind := obj.GetObjectKind().GroupVersionKind().Kind
objMeta, _ := meta.Accessor(obj)
log := r.Logger.
WithField("func", "ReconcileObject").
WithField("kind", kind).
WithField("name", objMeta.GetName())
r.Own(objMeta)
op, err := controllerutil.CreateOrUpdate(
r.Ctx, r.Client, obj.(runtime.Object),
func(obj runtime.Object) error {
if desiredFunc != nil {
desiredFunc()
}
return nil
},
)
if err != nil {
log.Errorf("ReconcileObject Failed: %v", err)
return err
}
log.Infof("Done. op=%s", op)
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPhase(phase nbv1.SystemPhase) {
r.Logger.Infof("SetPhase: %s", phase)
r.NooBaa.Status.Phase = phase
conditions := &r.NooBaa.Status.Conditions
reason := fmt.Sprintf("NooBaaSystemPhase%s", phase)
message := fmt.Sprintf("NooBaa operator system reconcile phase %s", phase)
switch phase {
case nbv1.SystemPhaseReady:
util.SetAvailableCondition(conditions, reason, message)
case nbv1.SystemPhaseRejected:
// handle rejected here too?
default:
util.SetProgressingCondition(conditions, reason, message)
}
}
| if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if | conditional_block |
reconciler.go | package system
import (
"bytes"
"context"
"fmt"
"strings"
"text/template"
"time"
"github.com/noobaa/noobaa-operator/build/_output/bundle"
nbv1 "github.com/noobaa/noobaa-operator/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/pkg/nb"
"github.com/noobaa/noobaa-operator/pkg/options"
"github.com/noobaa/noobaa-operator/pkg/util"
dockerref "github.com/docker/distribution/reference"
semver "github.com/hashicorp/go-version"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
// ContainerImageConstraint is the instantiated semver contraints used for image verification
ContainerImageConstraint, _ = semver.NewConstraint(options.ContainerImageConstraintSemver)
// NooBaaType is and empty noobaa struct used for passing the object type
NooBaaType = &nbv1.NooBaa{}
)
// Reconciler is the context for loading or reconciling a noobaa system
type Reconciler struct {
Request types.NamespacedName
Client client.Client
Scheme *runtime.Scheme
Ctx context.Context
Logger *logrus.Entry
Recorder record.EventRecorder
NBClient nb.Client
NooBaa *nbv1.NooBaa
CoreApp *appsv1.StatefulSet
ServiceMgmt *corev1.Service
ServiceS3 *corev1.Service
SecretServer *corev1.Secret
SecretOp *corev1.Secret
SecretAdmin *corev1.Secret
}
// NewReconciler initializes a reconciler to be used for loading or reconciling a noobaa system
func NewReconciler(
req types.NamespacedName,
client client.Client,
scheme *runtime.Scheme,
recorder record.EventRecorder,
) *Reconciler {
r := &Reconciler{
Request: req,
Client: client,
Scheme: scheme,
Recorder: recorder,
Ctx: context.TODO(),
Logger: logrus.WithFields(logrus.Fields{"ns": req.Namespace}),
NooBaa: util.KubeObject(bundle.File_deploy_crds_noobaa_v1alpha1_noobaa_cr_yaml).(*nbv1.NooBaa),
CoreApp: util.KubeObject(bundle.File_deploy_internal_statefulset_core_yaml).(*appsv1.StatefulSet),
ServiceMgmt: util.KubeObject(bundle.File_deploy_internal_service_mgmt_yaml).(*corev1.Service),
ServiceS3: util.KubeObject(bundle.File_deploy_internal_service_s3_yaml).(*corev1.Service),
SecretServer: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretOp: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretAdmin: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
}
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
// Set Namespace
r.NooBaa.Namespace = r.Request.Namespace
r.CoreApp.Namespace = r.Request.Namespace
r.ServiceMgmt.Namespace = r.Request.Namespace
r.ServiceS3.Namespace = r.Request.Namespace
r.SecretServer.Namespace = r.Request.Namespace
r.SecretOp.Namespace = r.Request.Namespace
r.SecretAdmin.Namespace = r.Request.Namespace
// Set Names
r.NooBaa.Name = r.Request.Name
r.CoreApp.Name = r.Request.Name + "-core"
r.ServiceMgmt.Name = r.Request.Name + "-mgmt"
r.ServiceS3.Name = "s3"
r.SecretServer.Name = r.Request.Name + "-server"
r.SecretOp.Name = r.Request.Name + "-operator"
r.SecretAdmin.Name = r.Request.Name + "-admin"
return r
}
// Load reads the state of the kubernetes objects of the system
func (r *Reconciler) Load() {
util.KubeCheck(r.NooBaa)
util.KubeCheck(r.CoreApp)
util.KubeCheck(r.ServiceMgmt)
util.KubeCheck(r.ServiceS3)
util.KubeCheck(r.SecretServer)
util.KubeCheck(r.SecretOp)
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
}
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
log := r.Logger.WithField("func", "Reconcile")
log.Infof("Start ...")
util.KubeCheck(r.NooBaa)
if r.NooBaa.UID == "" {
log.Infof("NooBaa not found or already deleted. Skip reconcile.")
return reconcile.Result{}, nil
}
err := r.RunReconcile()
if util.IsPersistentError(err) {
log.Errorf("❌ Persistent Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
log.Infof("Parsed image (NamedTagged) %v", image)
imageName = image.Name()
imageTag = image.Tag()
case dockerref.Tagged:
log.Infof("Parsed image (Tagged) %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image %q contraints %q", imageRef.String(), ContainerImageConstraint.String())
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image version requested %q, I hope you know what you're doing ...`, imageRef)
}
}
} else {
log.Infof("Using custom image name %q the default is %q", imageRef.String(), options.ContainerImageName)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image requested %q, I hope you know what you're doing ...`, imageRef)
}
}
// Set ActualImage to be updated in the noobaa status
r.NooBaa.Status.ActualImage = specImage
return nil
}
// CheckServiceStatus populates the status of a service by detecting all of its addresses
func (r *Reconciler) CheckServiceStatus(srv *corev1.Service, status *nbv1.ServiceStatus, portName string) {
log := r.Logger.WithField("func", "CheckServiceStatus").WithField("service", srv.Name)
*status = nbv1.ServiceStatus{}
servicePort := nb.FindPortByName(srv, portName)
proto := "http"
if strings.HasSuffix(portName, "https") {
proto = "https"
}
// Node IP:Port
// Pod IP:Port
pods := corev1.PodList{}
podsListOptions := &client.ListOptions{
Namespace: r.Request.Namespace,
LabelSelector: labels.SelectorFromSet(srv.Spec.Selector),
}
err := r.Client.List(r.Ctx, podsListOptions, &pods)
if err == nil {
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
if pod.Status.HostIP != "" {
status.NodePorts = append(
status.NodePorts,
fmt.Sprintf("%s://%s:%d", proto, pod.Status.HostIP, servicePort.NodePort),
)
}
if pod.Status.PodIP != "" {
status.PodPorts = append(
status.PodPorts,
fmt.Sprintf("%s://%s:%s", proto, pod.Status.PodIP, servicePort.TargetPort.String()),
)
}
}
}
}
// Cluster IP:Port (of the service)
if srv.Spec.ClusterIP != "" {
status.InternalIP = append(
status.InternalIP,
fmt.Sprintf("%s://%s:%d", proto, srv.Spec.ClusterIP, servicePort.Port),
)
status.InternalDNS = append(
status.InternalDNS,
fmt.Sprintf("%s://%s.%s:%d", proto, srv.Name, srv.Namespace, servicePort.Port),
)
}
// LoadBalancer IP:Port (of the service)
if srv.Status.LoadBalancer.Ingress != nil {
for _, lb := range srv.Status.LoadBalancer.Ingress {
if lb.IP != "" {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, lb.IP, servicePort.Port),
)
}
if lb.Hostname != "" {
status.ExternalDNS = append(
status.ExternalDNS,
fmt.Sprintf("%s://%s:%d", proto, lb.Hostname, servicePort.Port),
)
}
}
}
// External IP:Port (of the service)
if srv.Spec.ExternalIPs != nil {
for _, ip := range srv.Spec.ExternalIPs {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, ip, servicePort.Port),
)
}
}
log.Infof("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
lo | readmeTemplate = template.Must(template.New("NooBaaSystem.Status.Readme").Parse(`
Welcome to NooBaa!
-----------------
Lets get started:
1. Connect to Management console:
Read your mgmt console login information (email & password) from secret: "{{.SecretAdmin.Name}}".
kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq '.data|map_values(@base64d)'
Open the management console service - take External IP/DNS or Node Port or use port forwarding:
kubectl port-forward -n {{.ServiceMgmt.Namespace}} service/{{.ServiceMgmt.Name}} 11443:8443 &
open https://localhost:11443
2. Test S3 client:
kubectl port-forward -n {{.ServiceS3.Namespace}} service/{{.ServiceS3.Name}} 10443:443 &
NOOBAA_ACCESS_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d')
NOOBAA_SECRET_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d')
alias s3='AWS_ACCESS_KEY_ID=$NOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=$NOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3'
s3 ls
`))
// Complete populates the noobaa status at the end of reconcile.
func (r *Reconciler) Complete() error {
var readmeBuffer bytes.Buffer
err := readmeTemplate.Execute(&readmeBuffer, r)
if err != nil {
return err
}
r.NooBaa.Status.Readme = readmeBuffer.String()
r.NooBaa.Status.Accounts.Admin.SecretRef.Name = r.SecretAdmin.Name
r.NooBaa.Status.Accounts.Admin.SecretRef.Namespace = r.SecretAdmin.Namespace
return nil
}
// Own sets the object owner references to the noobaa system
func (r *Reconciler) Own(obj metav1.Object) {
util.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))
}
// GetObject gets an object by name from the request namespace.
func (r *Reconciler) GetObject(name string, obj runtime.Object) error {
return r.Client.Get(r.Ctx, client.ObjectKey{Namespace: r.Request.Namespace, Name: name}, obj)
}
// ReconcileObject is a generic call to reconcile a kubernetes object
// desiredFunc can be passed to modify the object before create/update.
// Currently we ignore enforcing a desired state, but it might be needed on upgrades.
func (r *Reconciler) ReconcileObject(obj runtime.Object, desiredFunc func()) error {
kind := obj.GetObjectKind().GroupVersionKind().Kind
objMeta, _ := meta.Accessor(obj)
log := r.Logger.
WithField("func", "ReconcileObject").
WithField("kind", kind).
WithField("name", objMeta.GetName())
r.Own(objMeta)
op, err := controllerutil.CreateOrUpdate(
r.Ctx, r.Client, obj.(runtime.Object),
func(obj runtime.Object) error {
if desiredFunc != nil {
desiredFunc()
}
return nil
},
)
if err != nil {
log.Errorf("ReconcileObject Failed: %v", err)
return err
}
log.Infof("Done. op=%s", op)
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPhase(phase nbv1.SystemPhase) {
r.Logger.Infof("SetPhase: %s", phase)
r.NooBaa.Status.Phase = phase
conditions := &r.NooBaa.Status.Conditions
reason := fmt.Sprintf("NooBaaSystemPhase%s", phase)
message := fmt.Sprintf("NooBaa operator system reconcile phase %s", phase)
switch phase {
case nbv1.SystemPhaseReady:
util.SetAvailableCondition(conditions, reason, message)
case nbv1.SystemPhaseRejected:
// handle rejected here too?
default:
util.SetProgressingCondition(conditions, reason, message)
}
}
| g := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: secretAdminName,
Labels: map[string]string{"app": "noobaa"},
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"system": name,
"email": options.AdminAccountEmail,
"password": string(r.SecretOp.Data["password"]),
},
}
log.Infof("listing accounts")
res, err := r.NBClient.ListAccountsAPI()
if err != nil {
return err
}
for _, account := range res.Accounts {
if account.Email == options.AdminAccountEmail {
if len(account.AccessKeys) > 0 {
r.SecretAdmin.StringData["AWS_ACCESS_KEY_ID"] = account.AccessKeys[0].AccessKey
r.SecretAdmin.StringData["AWS_SECRET_ACCESS_KEY"] = account.AccessKeys[0].SecretKey
}
}
}
r.Own(r.SecretAdmin)
return r.Client.Create(r.Ctx, r.SecretAdmin)
}
var | identifier_body |
reconciler.go | package system
import (
"bytes"
"context"
"fmt"
"strings"
"text/template"
"time"
"github.com/noobaa/noobaa-operator/build/_output/bundle"
nbv1 "github.com/noobaa/noobaa-operator/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/pkg/nb"
"github.com/noobaa/noobaa-operator/pkg/options"
"github.com/noobaa/noobaa-operator/pkg/util"
dockerref "github.com/docker/distribution/reference"
semver "github.com/hashicorp/go-version"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
// ContainerImageConstraint is the instantiated semver contraints used for image verification
ContainerImageConstraint, _ = semver.NewConstraint(options.ContainerImageConstraintSemver)
// NooBaaType is and empty noobaa struct used for passing the object type
NooBaaType = &nbv1.NooBaa{}
)
// Reconciler is the context for loading or reconciling a noobaa system
type Reconciler struct {
Request types.NamespacedName
Client client.Client
Scheme *runtime.Scheme
Ctx context.Context
Logger *logrus.Entry
Recorder record.EventRecorder
NBClient nb.Client
NooBaa *nbv1.NooBaa
CoreApp *appsv1.StatefulSet
ServiceMgmt *corev1.Service
ServiceS3 *corev1.Service
SecretServer *corev1.Secret
SecretOp *corev1.Secret
SecretAdmin *corev1.Secret
}
// NewReconciler initializes a reconciler to be used for loading or reconciling a noobaa system
func NewReconciler(
req types.NamespacedName,
client client.Client,
scheme *runtime.Scheme,
recorder record.EventRecorder,
) *Reconciler {
r := &Reconciler{
Request: req,
Client: client,
Scheme: scheme,
Recorder: recorder,
Ctx: context.TODO(),
Logger: logrus.WithFields(logrus.Fields{"ns": req.Namespace}),
NooBaa: util.KubeObject(bundle.File_deploy_crds_noobaa_v1alpha1_noobaa_cr_yaml).(*nbv1.NooBaa),
CoreApp: util.KubeObject(bundle.File_deploy_internal_statefulset_core_yaml).(*appsv1.StatefulSet),
ServiceMgmt: util.KubeObject(bundle.File_deploy_internal_service_mgmt_yaml).(*corev1.Service),
ServiceS3: util.KubeObject(bundle.File_deploy_internal_service_s3_yaml).(*corev1.Service),
SecretServer: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretOp: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
SecretAdmin: util.KubeObject(bundle.File_deploy_internal_secret_empty_yaml).(*corev1.Secret),
}
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
// Set Namespace
r.NooBaa.Namespace = r.Request.Namespace
r.CoreApp.Namespace = r.Request.Namespace
r.ServiceMgmt.Namespace = r.Request.Namespace
r.ServiceS3.Namespace = r.Request.Namespace
r.SecretServer.Namespace = r.Request.Namespace
r.SecretOp.Namespace = r.Request.Namespace
r.SecretAdmin.Namespace = r.Request.Namespace
// Set Names
r.NooBaa.Name = r.Request.Name
r.CoreApp.Name = r.Request.Name + "-core"
r.ServiceMgmt.Name = r.Request.Name + "-mgmt"
r.ServiceS3.Name = "s3"
r.SecretServer.Name = r.Request.Name + "-server"
r.SecretOp.Name = r.Request.Name + "-operator"
r.SecretAdmin.Name = r.Request.Name + "-admin"
return r
}
// Load reads the state of the kubernetes objects of the system
func (r *Reconciler) Load() {
util.KubeCheck(r.NooBaa)
util.KubeCheck(r.CoreApp)
util.KubeCheck(r.ServiceMgmt)
util.KubeCheck(r.ServiceS3)
util.KubeCheck(r.SecretServer)
util.KubeCheck(r.SecretOp)
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretAdmin)
}
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
log := r.Logger.WithField("func", "Reconcile")
log.Infof("Start ...")
util.KubeCheck(r.NooBaa)
if r.NooBaa.UID == "" {
log.Infof("NooBaa not found or already deleted. Skip reconcile.")
return reconcile.Result{}, nil
}
err := r.RunReconcile()
if util.IsPersistentError(err) {
log.Errorf("❌ Persistent Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{}, nil
}
if err != nil {
log.Warnf("⏳ Temporary Error: %s", err)
util.SetErrorCondition(&r.NooBaa.Status.Conditions, err)
r.UpdateStatus()
return reconcile.Result{RequeueAfter: 2 * time.Second}, nil
}
r.UpdateStatus()
log.Infof("✅ Done")
return reconcile.Result{}, nil
}
// UpdateStatus updates the system status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
log := r.Logger.WithField("func", "UpdateStatus")
log.Infof("Updating noobaa status")
r.NooBaa.Status.ObservedGeneration = r.NooBaa.Generation
return r.Client.Status().Update(r.Ctx, r.NooBaa)
}
// RunReconcile runs the reconcile flow and populates System.Status.
func (r *Reconciler) RunReconcile() error {
r.SetPhase(nbv1.SystemPhaseVerifying)
if err := r.CheckSystemCR(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseCreating)
if err := r.ReconcileSecretServer(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConnecting)
if err := r.Connect(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseConfiguring)
if err := r.ReconcileSecretOp(); err != nil {
return err
}
if err := r.ReconcileSecretAdmin(); err != nil {
return err
}
r.SetPhase(nbv1.SystemPhaseReady)
return r.Complete()
}
// ReconcileSecretServer creates a secret needed for the server pod
func (r *Reconciler) ReconcileSecretServer() error {
util.KubeCheck(r.SecretServer)
util.SecretResetStringDataFromData(r.SecretServer)
if r.SecretServer.StringData["jwt"] == "" {
r.SecretServer.StringData["jwt"] = util.RandomBase64(16)
}
if r.SecretServer.StringData["server_secret"] == "" {
r.SecretServer.StringData["server_secret"] = util.RandomHex(4)
}
r.Own(r.SecretServer)
util.KubeCreateSkipExisting(r.SecretServer)
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-s3"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa-operator" // TODO do we use the same SA?
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init-mongo" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "noobaa-server" {
c.Image = r.NooBaa.Status.ActualImage
for j := range c.Env {
if c.Env[j].Name == "AGENT_PROFILE" {
c.Env[j].Value = fmt.Sprintf(`{ "image": "%s" }`, r.NooBaa.Status.ActualImage)
}
}
if r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
} else if c.Name == "mongodb" {
if r.NooBaa.Spec.MongoImage == nil {
c.Image = options.MongoImage
} else {
c.Image = *r.NooBaa.Spec.MongoImage
}
if r.NooBaa.Spec.MongoResources != nil {
c.Resources = *r.NooBaa.Spec.MongoResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
for i := range r.CoreApp.Spec.VolumeClaimTemplates {
pvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]
pvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName
// TODO we want to own the PVC's by NooBaa system but get errors on openshift:
// Warning FailedCreate 56s statefulset-controller
// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:
// Failed to create PVC mongo-datadir-noobaa-core-0:
// persistentvolumeclaims "mongo-datadir-noobaa-core-0" is forbidden:
// cannot set blockOwnerDeletion if an ownerReference refers to a resource
// you can't set finalizers on: , <nil>, ...
// r.Own(pvc)
}
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
}
// CheckSystemCR checks the validity of the system CR
// (i.e system.metadata.name and system.spec.image)
// and updates the status accordingly
func (r *Reconciler) CheckSystemCR() error {
log := r.Logger.WithField("func", "CheckSystemCR")
// we assume a single system per ns here
if r.NooBaa.Name != options.SystemName {
err := fmt.Errorf("Invalid system name %q expected %q", r.NooBaa.Name, options.SystemName)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadName", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
specImage := options.ContainerImage
if r.NooBaa.Spec.Image != nil {
specImage = *r.NooBaa.Spec.Image
}
// Parse the image spec as a docker image url
imageRef, err := dockerref.Parse(specImage)
// If the image cannot be parsed log the incident and mark as persistent error
// since we don't need to retry until the spec is updated.
if err != nil {
log.Errorf("Invalid image %s: %s", specImage, err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning,
"BadImage", `Invalid image requested %q`, specImage)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
// Get the image name and tag
imageName := ""
imageTag := ""
switch image := imageRef.(type) {
case dockerref.NamedTagged:
log.Infof("Parsed image (NamedTagged) %v", image)
imageName = image.Name()
imageTag = image.Tag()
case dockerref.Tagged:
log.Infof("Parsed image (Tagged) %v", image)
imageTag = image.Tag()
case dockerref.Named:
log.Infof("Parsed image (Named) %v", image)
imageName = image.Name()
default:
log.Infof("Parsed image (unstructured) %v", image)
}
if imageName == options.ContainerImageName {
version, err := semver.NewVersion(imageTag)
if err == nil {
log.Infof("Parsed version %q from image tag %q", version.String(), imageTag)
if !ContainerImageConstraint.Check(version) {
err := fmt.Errorf(`Unsupported image version %q not matching contraints %q`,
imageRef, ContainerImageConstraint)
log.Errorf("%s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "BadImage", "%s", err)
}
r.SetPhase(nbv1.SystemPhaseRejected)
return util.NewPersistentError(err)
}
} else {
log.Infof("Using custom image %q contraints %q", imageRef.String(), ContainerImageConstraint.String())
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image version requested %q, I hope you know what you're doing ...`, imageRef)
}
}
} else {
log.Infof("Using custom image name %q the default is %q", imageRef.String(), options.ContainerImageName)
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"CustomImage", `Custom image requested %q, I hope you know what you're doing ...`, imageRef)
}
}
// Set ActualImage to be updated in the noobaa status
r.NooBaa.Status.ActualImage = specImage
return nil
}
// CheckServiceStatus populates the status of a service by detecting all of its addresses
func (r *Reconciler) CheckServiceStatus(srv *corev1.Service, status *nbv1.ServiceStatus, portName string) {
log := r.Logger.WithField("func", "CheckServiceStatus").WithField("service", srv.Name)
*status = nbv1.ServiceStatus{}
servicePort := nb.FindPortByName(srv, portName)
proto := "http"
if strings.HasSuffix(portName, "https") {
proto = "https"
}
// Node IP:Port
// Pod IP:Port
pods := corev1.PodList{}
podsListOptions := &client.ListOptions{
Namespace: r.Request.Namespace,
LabelSelector: labels.SelectorFromSet(srv.Spec.Selector),
}
err := r.Client.List(r.Ctx, podsListOptions, &pods)
if err == nil {
for _, pod := range pods.Items {
if pod.Status.Phase == corev1.PodRunning {
if pod.Status.HostIP != "" {
status.NodePorts = append(
status.NodePorts,
fmt.Sprintf("%s://%s:%d", proto, pod.Status.HostIP, servicePort.NodePort),
)
}
if pod.Status.PodIP != "" {
status.PodPorts = append(
status.PodPorts,
fmt.Sprintf("%s://%s:%s", proto, pod.Status.PodIP, servicePort.TargetPort.String()),
)
}
}
}
}
// Cluster IP:Port (of the service)
if srv.Spec.ClusterIP != "" {
status.InternalIP = append(
status.InternalIP,
fmt.Sprintf("%s://%s:%d", proto, srv.Spec.ClusterIP, servicePort.Port),
)
status.InternalDNS = append(
status.InternalDNS,
fmt.Sprintf("%s://%s.%s:%d", proto, srv.Name, srv.Namespace, servicePort.Port),
)
}
// LoadBalancer IP:Port (of the service)
if srv.Status.LoadBalancer.Ingress != nil {
for _, lb := range srv.Status.LoadBalancer.Ingress {
if lb.IP != "" {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, lb.IP, servicePort.Port),
)
}
if lb.Hostname != "" {
status.ExternalDNS = append(
status.ExternalDNS,
fmt.Sprintf("%s://%s:%d", proto, lb.Hostname, servicePort.Port),
)
}
}
}
// External IP:Port (of the service)
if srv.Spec.ExternalIPs != nil {
for _, ip := range srv.Spec.ExternalIPs {
status.ExternalIP = append(
status.ExternalIP,
fmt.Sprintf("%s://%s:%d", proto, ip, servicePort.Port),
)
}
}
log.Infof("Collected addresses: %+v", status)
}
// Connect initializes the noobaa client for making calls to the server.
func (r *Reconciler) Connect() error {
r.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, "mgmt-https")
r.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, "s3-https")
if len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {
return fmt.Errorf("core pod port not ready yet")
}
nodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]
nodeIP := nodePort[strings.Index(nodePort, "://")+3 : strings.LastIndex(nodePort, ":")]
r.NBClient = nb.NewClient(&nb.APIRouterNodePort{
ServiceMgmt: r.ServiceMgmt,
NodeIP: nodeIP,
})
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// Check that the server is indeed serving the API already
// we use the read_auth call here because it's an API that always answers
// even when auth_token is empty.
_, err := r.NBClient.ReadAuthAPI()
return err
// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {
// podPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]
// podIP := podPort[strings.Index(podPort, "://")+3 : strings.LastIndex(podPort, ":")]
// r.NBClient = nb.NewClient(&nb.APIRouterPodPort{
// ServiceMgmt: r.ServiceMgmt,
// PodIP: podIP,
// })
// r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
// return nil
// }
}
// ReconcileSecretOp creates a new system in the noobaa server if not created yet.
func (r *Reconciler) ReconcileSecretOp() error {
// log := r.Logger.WithName("ReconcileSecretOp")
util.KubeCheck(r.SecretOp)
util.SecretResetStringDataFromData(r.SecretOp)
if r.SecretOp.StringData["auth_token"] != "" {
return nil
}
if r.SecretOp.StringData["email"] == "" {
r.SecretOp.StringData["email"] = options.AdminAccountEmail
}
if r.SecretOp.StringData["password"] == "" {
r.SecretOp.StringData["password"] = util.RandomBase64(16)
r.Own(r.SecretOp)
err := r.Client.Create(r.Ctx, r.SecretOp)
if err != nil {
return err
}
}
res, err := r.NBClient.CreateAuthAPI(nb.CreateAuthParams{
System: r.Request.Name,
Role: "admin",
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err == nil {
// TODO this recovery flow does not allow us to get OperatorToken like CreateSystem
r.SecretOp.StringData["auth_token"] = res.Token
} else {
res, err := r.NBClient.CreateSystemAPI(nb.CreateSystemParams{
Name: r.Request.Name,
Email: r.SecretOp.StringData["email"],
Password: r.SecretOp.StringData["password"],
})
if err != nil {
return err
}
// TODO use res.OperatorToken after https://github.com/noobaa/noobaa-core/issues/5635
r.SecretOp.StringData["auth_token"] = res.Token
}
r.NBClient.SetAuthToken(r.SecretOp.StringData["auth_token"])
return r.Client.Update(r.Ctx, r.SecretOp)
}
// ReconcileSecretAdmin creates the admin secret
func (r *Reconciler) ReconcileSecretAdmin() error {
log := r.Logger.WithField("func", "ReconcileSecretAdmin")
util.KubeCheck(r.SecretAdmin)
util.SecretResetStringDataFromData(r.SecretAdmin)
ns := r.Request.Namespace
name := r.Request.Name
secretAdminName := name + "-admin"
r.SecretAdmin = &corev1.Secret{}
err := r.GetObject(secretAdminName, r.SecretAdmin)
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
log.Errorf("Failed getting admin secret: %v", err)
return err
}
r.SecretAdmin = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: secretAdminName,
Labels: map[string]string{"app": "noobaa"},
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"system": name,
"email": options.AdminAccountEmail,
"password": string(r.SecretOp.Data["password"]),
},
}
log.Infof("listing accounts")
res, err := r.NBClient.ListAccountsAPI()
if err != nil {
return err
}
for _, account := range res.Accounts {
if account.Email == options.AdminAccountEmail {
if len(account.AccessKeys) > 0 {
r.SecretAdmin.StringData["AWS_ACCESS_KEY_ID"] = account.AccessKeys[0].AccessKey
r.SecretAdmin.StringData["AWS_SECRET_ACCESS_KEY"] = account.AccessKeys[0].SecretKey
}
}
}
r.Own(r.SecretAdmin)
return r.Client.Create(r.Ctx, r.SecretAdmin)
}
var readmeTemplate = template.Must(template.New("NooBaaSystem.Status.Readme").Parse(`
Welcome to NooBaa!
-----------------
Lets get started:
1. Connect to Management console:
Read your mgmt console login information (email & password) from secret: "{{.SecretAdmin.Name}}".
kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq '.data|map_values(@base64d)'
Open the management console service - take External IP/DNS or Node Port or use port forwarding:
kubectl port-forward -n {{.ServiceMgmt.Namespace}} service/{{.ServiceMgmt.Name}} 11443:8443 &
open https://localhost:11443
2. Test S3 client:
kubectl port-forward -n {{.ServiceS3.Namespace}} service/{{.ServiceS3.Name}} 10443:443 &
NOOBAA_ACCESS_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d')
NOOBAA_SECRET_KEY=$(kubectl get secret {{.SecretAdmin.Name}} -n {{.SecretAdmin.Namespace}} -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d')
alias s3='AWS_ACCESS_KEY_ID=$NOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=$NOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3'
s3 ls
`))
// Complete populates the noobaa status at the end of reconcile.
func (r *Reconciler) Complete() error {
var readmeBuffer bytes.Buffer
err := readmeTemplate.Execute(&readmeBuffer, r)
if err != nil {
return err
}
r.NooBaa.Status.Readme = readmeBuffer.String()
r.NooBaa.Status.Accounts.Admin.SecretRef.Name = r.SecretAdmin.Name
r.NooBaa.Status.Accounts.Admin.SecretRef.Namespace = r.SecretAdmin.Namespace
return nil
}
// Own sets the object owner references to the noobaa system
func (r *Reconciler) Own(obj metav1.Object) {
util.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))
}
// GetObject gets an object by name from the request namespace.
func (r *Reconciler) GetObject(name string, obj runtime.Object) error {
return r.Client.Get(r.Ctx, client.ObjectKey{Namespace: r.Request.Namespace, Name: name}, obj)
}
// ReconcileObject is a generic call to reconcile a kubernetes object
// desiredFunc can be passed to modify the object before create/update.
// Currently we ignore enforcing a desired state, but it might be needed on upgrades.
func (r *Reconciler) ReconcileObject(obj runtime.Object, desiredFunc func()) error {
kind := obj.GetObjectKind().GroupVersionKind().Kind
objMeta, _ := meta.Accessor(obj)
log := r.Logger.
WithField("func", "ReconcileObject").
WithField("kind", kind).
WithField("name", objMeta.GetName())
r.Own(objMeta)
op, err := controllerutil.CreateOrUpdate(
r.Ctx, r.Client, obj.(runtime.Object),
func(obj runtime.Object) error {
if desiredFunc != nil {
desiredFunc()
}
return nil
},
)
if err != nil {
log.Errorf("ReconcileObject Failed: %v", err)
return err
}
log.Infof("Done. op=%s", op)
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPha | nbv1.SystemPhase) {
r.Logger.Infof("SetPhase: %s", phase)
r.NooBaa.Status.Phase = phase
conditions := &r.NooBaa.Status.Conditions
reason := fmt.Sprintf("NooBaaSystemPhase%s", phase)
message := fmt.Sprintf("NooBaa operator system reconcile phase %s", phase)
switch phase {
case nbv1.SystemPhaseReady:
util.SetAvailableCondition(conditions, reason, message)
case nbv1.SystemPhaseRejected:
// handle rejected here too?
default:
util.SetProgressingCondition(conditions, reason, message)
}
}
| se(phase | identifier_name |
lib.rs | // LNP/BP lLibraries implementing LNPBP specifications & standards
// Written in 2021-2022 by
// Dr. Maxim Orlovsky <orlovsky@pandoraprime.ch>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
// Coding conventions
#![recursion_limit = "256"]
#![deny(dead_code, missing_docs, warnings)]
//! Library implementing LNPBP-14 standard: Bech32 encoding for
//! client-side-validated data.
//!
//! Types that need to have `data1...` and `z1...` bech 32 implementation
//! according to LNPBP-14 must implement [`ToBech32Payload`] and
//! [`FromBech32Payload`] traits.
//!
//! Bech32 `id1...` representation is provided automatically only for hash types
//! implementing [`bitcoin_hashes::Hash`] trait
#[macro_use]
extern crate amplify;
#[macro_use]
extern crate strict_encoding;
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde_crate as serde;
use std::convert::{Infallible, TryFrom};
use std::fmt::{self, Debug, Formatter};
use std::str::FromStr;
use amplify::hex::ToHex;
use bech32::{FromBase32, ToBase32, Variant};
use bitcoin_hashes::{sha256t, Hash};
#[cfg(feature = "zip")]
use deflate::{write::DeflateEncoder, Compression};
#[cfg(feature = "serde")]
use serde::{
de::{Error as SerdeError, Unexpected, Visitor},
Deserializer, Serializer,
};
#[cfg(feature = "serde")]
use serde_with::{hex::Hex, As};
/// Bech32 HRP used in generic identifiers
pub const HRP_ID: &str = "id";
/// Bech32 HRP used for representation of arbitrary data blobs in their raw
/// (uncompressed) form
pub const HRP_DATA: &str = "data";
#[cfg(feature = "zip")]
/// Bech32 HRP used for representation of zip-compressed blobs
pub const HRP_ZIP: &str = "z";
/// Constant specifying default compression algorithm ("deflate")
#[cfg(feature = "zip")]
pub const RAW_DATA_ENCODING_DEFLATE: u8 = 1u8;
/// Errors generated by Bech32 conversion functions (both parsing and
/// type-specific conversion errors)
#[derive(Clone, PartialEq, Eq, Display, Debug, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// bech32 string parse error - {0}
#[from]
Bech32Error(::bech32::Error),
/// payload data are not strictly encoded - {0}
#[from]
NotStrictEncoded(strict_encoding::Error),
/// payload data are not a bitcoin hash - {0}
#[from]
NotBitcoinHash(bitcoin_hashes::Error),
/// Requested object type does not match used Bech32 HRP
WrongPrefix,
/// bech32m encoding must be used instead of legacy bech32
WrongVariant,
/// payload must start with encoding prefix
NoEncodingPrefix,
/// provided raw data use unknown encoding version {0}
UnknownRawDataEncoding(u8),
/// can not encode raw data with DEFLATE algorithm
DeflateEncoding,
/// error inflating compressed data from payload: {0}
InflateError(String),
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Self {
unreachable!("infalliable error in lnpbp_bech32 blob")
} | /// Type for wrapping Vec<u8> data in cases you need to do a convenient
/// enum variant display derives with `#[display(inner)]`
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", transparent)
)]
#[derive(
Wrapper, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default, Display,
From
)]
#[derive(StrictEncode, StrictDecode)]
#[wrap(
Index,
IndexMut,
IndexRange,
IndexFull,
IndexFrom,
IndexTo,
IndexInclusive
)]
#[display(Vec::bech32_data_string)]
// We get `(To)Bech32DataString` and `FromBech32DataString` for free b/c
// the wrapper creates `From<Vec<u8>>` impl for us, which with rust stdlib
// implies `TryFrom<Vec<u8>>`, for which we have auto trait derivation
// `FromBech32Payload`, for which the traits above are automatically derived
pub struct Blob(
#[cfg_attr(feature = "serde", serde(with = "As::<Hex>"))] Vec<u8>,
);
impl AsRef<[u8]> for Blob {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl Debug for Blob {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Blob({})", self.0.to_hex())
}
}
impl FromStr for Blob {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Blob::from_bech32_data_str(s)
}
}
/// Convertor trait for extracting data from a given type which will be part of
/// Bech32 payload
pub trait ToBech32Payload {
/// Must return a vector with Bech32 payload data
fn to_bech32_payload(&self) -> Vec<u8>;
}
/// Extracts data representation from a given type which will be part of Bech32
/// payload
pub trait AsBech32Payload {
/// Must return a reference to a slice representing Bech32 payload data
fn as_bech32_payload(&self) -> &[u8];
}
impl<T> AsBech32Payload for T
where
T: AsRef<[u8]>,
{
fn as_bech32_payload(&self) -> &[u8] { self.as_ref() }
}
/// Convertor which constructs a given type from Bech32 payload data
pub trait FromBech32Payload
where
Self: Sized,
{
/// Construct type from Bech32 payload data
fn from_bech32_payload(payload: Vec<u8>) -> Result<Self, Error>;
}
impl<T> FromBech32Payload for T
where
T: TryFrom<Vec<u8>>,
Error: From<T::Error>,
{
fn from_bech32_payload(payload: Vec<u8>) -> Result<T, Error> {
Ok(T::try_from(payload)?)
}
}
// -- Common (non-LNPBP-39) traits
/// Creates Bech32 string with appropriate type data representation.
/// Depending on the specific type, this may be `id`-string, `data`-string,
/// `z`-string or other type of HRP.
pub trait ToBech32String {
/// Creates Bech32 string with appropriate type data representation
fn to_bech32_string(&self) -> String;
}
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
pub trait FromBech32Str {
/// Specifies which HRP is used by Bech32 string representing this data type
const HRP: &'static str;
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
fn from_bech32_str(s: &str) -> Result<Self, Error>
where
Self: Sized;
}
/// Strategies for automatic implementation of the Bech32 traits
pub mod strategies {
use amplify::{Holder, Wrapper};
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
/// Strategy for Bech32 representation as uncompressed data (starting from
/// `data1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct UsingStrictEncoding;
/// Strategy for Bech32 representation of the newtypes wrapping other types.
/// The strategy simply inherits Bech32 representation from the inner type.
pub struct Wrapped;
#[cfg(feature = "zip")]
/// Strategy for Bech32 representation as compressed data (starting from
/// `z1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct CompressedStrictEncoding;
/// Helper trait for implementing specific strategy for Bech32 construction
pub trait Strategy {
/// Bech32 HRP prefix used by a type
const HRP: &'static str;
/// Specific strategy used for automatic implementation of all
/// Bech32-related traits.
type Strategy;
}
impl<T> ToBech32String for T
where
T: Strategy + Clone,
Holder<T, <T as Strategy>::Strategy>: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
Holder::new(self.clone()).to_bech32_string()
}
}
impl<T> FromBech32Str for T
where
T: Strategy,
Holder<T, <T as Strategy>::Strategy>: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Holder::from_bech32_str(s)?.into_inner())
}
}
impl<T> ToBech32String for Holder<T, Wrapped>
where
T: Wrapper,
T::Inner: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
self.as_inner().as_inner().to_bech32_string()
}
}
impl<T> FromBech32Str for Holder<T, Wrapped>
where
T: Wrapper + Strategy,
T::Inner: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::from_inner(T::Inner::from_bech32_str(s)?)))
}
}
impl<T> ToBech32String for Holder<T, UsingStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
::bech32::encode(T::HRP, data.to_base32(), Variant::Bech32m)
.unwrap_or_else(|_| s!("Error: wrong bech32 prefix"))
}
}
impl<T> FromBech32Str for Holder<T, UsingStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = ::bech32::decode(s)?;
if hrp.as_str() != Self::HRP {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Ok(Self::new(T::strict_deserialize(Vec::<u8>::from_base32(
&data,
)?)?))
}
}
}
pub use strategies::Strategy;
// -- Sealed traits & their implementation
/// Special trait for preventing implementation of [`FromBech32DataStr`] and
/// others from outside of this crate. For details see
/// <https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed>
mod sealed {
use amplify::Wrapper;
use super::*;
pub trait HashType<Tag>: Wrapper<Inner = sha256t::Hash<Tag>>
where
Tag: sha256t::Tag,
{
}
pub trait ToPayload: ToBech32Payload {}
pub trait AsPayload: AsBech32Payload {}
pub trait FromPayload: FromBech32Payload {}
impl<T, Tag> HashType<Tag> for T
where
T: Wrapper<Inner = sha256t::Hash<Tag>>,
Tag: sha256t::Tag,
{
}
impl<T> ToPayload for T where T: ToBech32Payload {}
impl<T> AsPayload for T where T: AsBech32Payload {}
impl<T> FromPayload for T where T: FromBech32Payload {}
}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait ToBech32DataString: sealed::ToPayload {
/// Returns `data1...` Bech32 representation of a given type
fn to_bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.to_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> ToBech32DataString for T where T: sealed::ToPayload {}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait Bech32DataString: sealed::AsPayload {
/// Returns `data1...` Bech32 representation of a given type
fn bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.as_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> Bech32DataString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `data1...` Bech32 string
pub trait FromBech32DataStr
where
Self: Sized + sealed::FromPayload,
{
/// Reconstructs type data from `data1...` Bech32 string
fn from_bech32_data_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = bech32::decode(s)?;
if hrp != HRP_DATA {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Self::from_bech32_payload(Vec::<u8>::from_base32(&data)?)
}
}
impl<T> FromBech32DataStr for T where T: sealed::FromPayload {}
#[doc(hidden)]
#[cfg(feature = "zip")]
pub mod zip {
use amplify::Holder;
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
fn payload_to_bech32_zip_string(hrp: &str, payload: &[u8]) -> String {
use std::io::Write;
// We initialize writer with a version byte, indicating deflation
// algorithm used
let writer = vec![RAW_DATA_ENCODING_DEFLATE];
let mut encoder = DeflateEncoder::new(writer, Compression::Best);
encoder
.write_all(payload)
.expect("in-memory strict encoder failure");
let data = encoder.finish().expect("zip algorithm failure");
::bech32::encode(hrp, data.to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
fn bech32_zip_str_to_payload(hrp: &str, s: &str) -> Result<Vec<u8>, Error> {
let (prefix, data, version) = bech32::decode(s)?;
if prefix != hrp {
return Err(Error::WrongPrefix);
}
if version != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let data = Vec::<u8>::from_base32(&data)?;
match *data[..].first().ok_or(Error::NoEncodingPrefix)? {
RAW_DATA_ENCODING_DEFLATE => {
let decoded = inflate::inflate_bytes(&data[1..])
.map_err(Error::InflateError)?;
Ok(decoded)
}
unknown_ver => Err(Error::UnknownRawDataEncoding(unknown_ver)),
}
}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait ToBech32ZipString: sealed::ToPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn to_bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, &self.to_bech32_payload())
}
}
impl<T> ToBech32ZipString for T where T: sealed::ToPayload {}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait Bech32ZipString: sealed::AsPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, self.as_bech32_payload())
}
}
impl<T> Bech32ZipString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `z1...` (compressed binary data
/// blob) Bech32 string
pub trait FromBech32ZipStr: sealed::FromPayload {
/// Reconstructs type data from `z1...` (compressed binary data blob)
/// Bech32 string
fn from_bech32_zip_str(s: &str) -> Result<Self, Error> {
Self::from_bech32_payload(bech32_zip_str_to_payload(HRP_ZIP, s)?)
}
}
impl<T> FromBech32ZipStr for T where T: sealed::FromPayload {}
impl<T> ToBech32String for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
payload_to_bech32_zip_string(T::HRP, &data)
}
}
impl<T> FromBech32Str for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::strict_deserialize(
bech32_zip_str_to_payload(Self::HRP, s)?,
)?))
}
}
}
#[cfg(feature = "zip")]
pub use zip::*;
/// Trait representing given bitcoin hash type as a Bech32 `id1...` value
pub trait ToBech32IdString<Tag>
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
/// Returns Bech32-encoded string in form of `id1...` representing the type
fn to_bech32_id_string(&self) -> String;
}
/// Trait that can generate the type from a given Bech32 `id1...` value
pub trait FromBech32IdStr<Tag>
where
Self: sealed::HashType<Tag> + Sized,
Tag: sha256t::Tag,
{
/// Reconstructs the identifier type from the provided Bech32 `id1...`
/// string
fn from_bech32_id_str(s: &str) -> Result<Self, Error>;
}
impl<T, Tag> ToBech32IdString<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn to_bech32_id_string(&self) -> String {
::bech32::encode(HRP_ID, self.to_inner().to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T, Tag> FromBech32IdStr<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn from_bech32_id_str(s: &str) -> Result<T, Error> {
let (hrp, id, variant) = ::bech32::decode(s)?;
if hrp != HRP_ID {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let vec = Vec::<u8>::from_base32(&id)?;
Ok(Self::from_inner(Self::Inner::from_slice(&vec)?))
}
}
/// Helper method for serde serialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn serialize<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: ToBech32String,
{
serializer.serialize_str(&data.to_bech32_string())
}
/// Helper method for serde deserialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromBech32Str,
{
deserializer.deserialize_str(Bech32Visitor::<T>(std::marker::PhantomData))
}
#[cfg(feature = "serde")]
struct Bech32Visitor<Value>(std::marker::PhantomData<Value>);
#[cfg(feature = "serde")]
impl<'de, ValueT> Visitor<'de> for Bech32Visitor<ValueT>
where
ValueT: FromBech32Str,
{
type Value = ValueT;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a bech32m-encoded string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
Self::Value::from_bech32_str(v).map_err(|_| {
E::invalid_value(Unexpected::Str(v), &"valid bech32 string")
})
}
} | }
| random_line_split |
lib.rs | // LNP/BP lLibraries implementing LNPBP specifications & standards
// Written in 2021-2022 by
// Dr. Maxim Orlovsky <orlovsky@pandoraprime.ch>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
// Coding conventions
#![recursion_limit = "256"]
#![deny(dead_code, missing_docs, warnings)]
//! Library implementing LNPBP-14 standard: Bech32 encoding for
//! client-side-validated data.
//!
//! Types that need to have `data1...` and `z1...` bech 32 implementation
//! according to LNPBP-14 must implement [`ToBech32Payload`] and
//! [`FromBech32Payload`] traits.
//!
//! Bech32 `id1...` representation is provided automatically only for hash types
//! implementing [`bitcoin_hashes::Hash`] trait
#[macro_use]
extern crate amplify;
#[macro_use]
extern crate strict_encoding;
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde_crate as serde;
use std::convert::{Infallible, TryFrom};
use std::fmt::{self, Debug, Formatter};
use std::str::FromStr;
use amplify::hex::ToHex;
use bech32::{FromBase32, ToBase32, Variant};
use bitcoin_hashes::{sha256t, Hash};
#[cfg(feature = "zip")]
use deflate::{write::DeflateEncoder, Compression};
#[cfg(feature = "serde")]
use serde::{
de::{Error as SerdeError, Unexpected, Visitor},
Deserializer, Serializer,
};
#[cfg(feature = "serde")]
use serde_with::{hex::Hex, As};
/// Bech32 HRP used in generic identifiers
pub const HRP_ID: &str = "id";
/// Bech32 HRP used for representation of arbitrary data blobs in their raw
/// (uncompressed) form
pub const HRP_DATA: &str = "data";
#[cfg(feature = "zip")]
/// Bech32 HRP used for representation of zip-compressed blobs
pub const HRP_ZIP: &str = "z";
/// Constant specifying default compression algorithm ("deflate")
#[cfg(feature = "zip")]
pub const RAW_DATA_ENCODING_DEFLATE: u8 = 1u8;
/// Errors generated by Bech32 conversion functions (both parsing and
/// type-specific conversion errors)
#[derive(Clone, PartialEq, Eq, Display, Debug, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// bech32 string parse error - {0}
#[from]
Bech32Error(::bech32::Error),
/// payload data are not strictly encoded - {0}
#[from]
NotStrictEncoded(strict_encoding::Error),
/// payload data are not a bitcoin hash - {0}
#[from]
NotBitcoinHash(bitcoin_hashes::Error),
/// Requested object type does not match used Bech32 HRP
WrongPrefix,
/// bech32m encoding must be used instead of legacy bech32
WrongVariant,
/// payload must start with encoding prefix
NoEncodingPrefix,
/// provided raw data use unknown encoding version {0}
UnknownRawDataEncoding(u8),
/// can not encode raw data with DEFLATE algorithm
DeflateEncoding,
/// error inflating compressed data from payload: {0}
InflateError(String),
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Self {
unreachable!("infalliable error in lnpbp_bech32 blob")
}
}
/// Type for wrapping Vec<u8> data in cases you need to do a convenient
/// enum variant display derives with `#[display(inner)]`
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", transparent)
)]
#[derive(
Wrapper, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default, Display,
From
)]
#[derive(StrictEncode, StrictDecode)]
#[wrap(
Index,
IndexMut,
IndexRange,
IndexFull,
IndexFrom,
IndexTo,
IndexInclusive
)]
#[display(Vec::bech32_data_string)]
// We get `(To)Bech32DataString` and `FromBech32DataString` for free b/c
// the wrapper creates `From<Vec<u8>>` impl for us, which with rust stdlib
// implies `TryFrom<Vec<u8>>`, for which we have auto trait derivation
// `FromBech32Payload`, for which the traits above are automatically derived
pub struct Blob(
#[cfg_attr(feature = "serde", serde(with = "As::<Hex>"))] Vec<u8>,
);
impl AsRef<[u8]> for Blob {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl Debug for Blob {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Blob({})", self.0.to_hex())
}
}
impl FromStr for Blob {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Blob::from_bech32_data_str(s)
}
}
/// Convertor trait for extracting data from a given type which will be part of
/// Bech32 payload
pub trait ToBech32Payload {
/// Must return a vector with Bech32 payload data
fn to_bech32_payload(&self) -> Vec<u8>;
}
/// Extracts data representation from a given type which will be part of Bech32
/// payload
pub trait AsBech32Payload {
/// Must return a reference to a slice representing Bech32 payload data
fn as_bech32_payload(&self) -> &[u8];
}
impl<T> AsBech32Payload for T
where
T: AsRef<[u8]>,
{
fn as_bech32_payload(&self) -> &[u8] { self.as_ref() }
}
/// Convertor which constructs a given type from Bech32 payload data
pub trait FromBech32Payload
where
Self: Sized,
{
/// Construct type from Bech32 payload data
fn from_bech32_payload(payload: Vec<u8>) -> Result<Self, Error>;
}
impl<T> FromBech32Payload for T
where
T: TryFrom<Vec<u8>>,
Error: From<T::Error>,
{
fn from_bech32_payload(payload: Vec<u8>) -> Result<T, Error> {
Ok(T::try_from(payload)?)
}
}
// -- Common (non-LNPBP-39) traits
/// Creates Bech32 string with appropriate type data representation.
/// Depending on the specific type, this may be `id`-string, `data`-string,
/// `z`-string or other type of HRP.
pub trait ToBech32String {
/// Creates Bech32 string with appropriate type data representation
fn to_bech32_string(&self) -> String;
}
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
pub trait FromBech32Str {
/// Specifies which HRP is used by Bech32 string representing this data type
const HRP: &'static str;
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
fn from_bech32_str(s: &str) -> Result<Self, Error>
where
Self: Sized;
}
/// Strategies for automatic implementation of the Bech32 traits
pub mod strategies {
use amplify::{Holder, Wrapper};
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
/// Strategy for Bech32 representation as uncompressed data (starting from
/// `data1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct UsingStrictEncoding;
/// Strategy for Bech32 representation of the newtypes wrapping other types.
/// The strategy simply inherits Bech32 representation from the inner type.
pub struct Wrapped;
#[cfg(feature = "zip")]
/// Strategy for Bech32 representation as compressed data (starting from
/// `z1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct CompressedStrictEncoding;
/// Helper trait for implementing specific strategy for Bech32 construction
pub trait Strategy {
/// Bech32 HRP prefix used by a type
const HRP: &'static str;
/// Specific strategy used for automatic implementation of all
/// Bech32-related traits.
type Strategy;
}
impl<T> ToBech32String for T
where
T: Strategy + Clone,
Holder<T, <T as Strategy>::Strategy>: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
Holder::new(self.clone()).to_bech32_string()
}
}
impl<T> FromBech32Str for T
where
T: Strategy,
Holder<T, <T as Strategy>::Strategy>: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Holder::from_bech32_str(s)?.into_inner())
}
}
impl<T> ToBech32String for Holder<T, Wrapped>
where
T: Wrapper,
T::Inner: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
self.as_inner().as_inner().to_bech32_string()
}
}
impl<T> FromBech32Str for Holder<T, Wrapped>
where
T: Wrapper + Strategy,
T::Inner: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::from_inner(T::Inner::from_bech32_str(s)?)))
}
}
impl<T> ToBech32String for Holder<T, UsingStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
::bech32::encode(T::HRP, data.to_base32(), Variant::Bech32m)
.unwrap_or_else(|_| s!("Error: wrong bech32 prefix"))
}
}
impl<T> FromBech32Str for Holder<T, UsingStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = ::bech32::decode(s)?;
if hrp.as_str() != Self::HRP {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Ok(Self::new(T::strict_deserialize(Vec::<u8>::from_base32(
&data,
)?)?))
}
}
}
pub use strategies::Strategy;
// -- Sealed traits & their implementation
/// Special trait for preventing implementation of [`FromBech32DataStr`] and
/// others from outside of this crate. For details see
/// <https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed>
mod sealed {
use amplify::Wrapper;
use super::*;
pub trait HashType<Tag>: Wrapper<Inner = sha256t::Hash<Tag>>
where
Tag: sha256t::Tag,
{
}
pub trait ToPayload: ToBech32Payload {}
pub trait AsPayload: AsBech32Payload {}
pub trait FromPayload: FromBech32Payload {}
impl<T, Tag> HashType<Tag> for T
where
T: Wrapper<Inner = sha256t::Hash<Tag>>,
Tag: sha256t::Tag,
{
}
impl<T> ToPayload for T where T: ToBech32Payload {}
impl<T> AsPayload for T where T: AsBech32Payload {}
impl<T> FromPayload for T where T: FromBech32Payload {}
}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait ToBech32DataString: sealed::ToPayload {
/// Returns `data1...` Bech32 representation of a given type
fn to_bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.to_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> ToBech32DataString for T where T: sealed::ToPayload {}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait Bech32DataString: sealed::AsPayload {
/// Returns `data1...` Bech32 representation of a given type
fn bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.as_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> Bech32DataString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `data1...` Bech32 string
pub trait FromBech32DataStr
where
Self: Sized + sealed::FromPayload,
{
/// Reconstructs type data from `data1...` Bech32 string
fn from_bech32_data_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = bech32::decode(s)?;
if hrp != HRP_DATA {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
Self::from_bech32_payload(Vec::<u8>::from_base32(&data)?)
}
}
impl<T> FromBech32DataStr for T where T: sealed::FromPayload {}
#[doc(hidden)]
#[cfg(feature = "zip")]
pub mod zip {
use amplify::Holder;
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
fn payload_to_bech32_zip_string(hrp: &str, payload: &[u8]) -> String {
use std::io::Write;
// We initialize writer with a version byte, indicating deflation
// algorithm used
let writer = vec![RAW_DATA_ENCODING_DEFLATE];
let mut encoder = DeflateEncoder::new(writer, Compression::Best);
encoder
.write_all(payload)
.expect("in-memory strict encoder failure");
let data = encoder.finish().expect("zip algorithm failure");
::bech32::encode(hrp, data.to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
fn bech32_zip_str_to_payload(hrp: &str, s: &str) -> Result<Vec<u8>, Error> {
let (prefix, data, version) = bech32::decode(s)?;
if prefix != hrp {
return Err(Error::WrongPrefix);
}
if version != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let data = Vec::<u8>::from_base32(&data)?;
match *data[..].first().ok_or(Error::NoEncodingPrefix)? {
RAW_DATA_ENCODING_DEFLATE => {
let decoded = inflate::inflate_bytes(&data[1..])
.map_err(Error::InflateError)?;
Ok(decoded)
}
unknown_ver => Err(Error::UnknownRawDataEncoding(unknown_ver)),
}
}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait ToBech32ZipString: sealed::ToPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn to_bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, &self.to_bech32_payload())
}
}
impl<T> ToBech32ZipString for T where T: sealed::ToPayload {}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait Bech32ZipString: sealed::AsPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, self.as_bech32_payload())
}
}
impl<T> Bech32ZipString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `z1...` (compressed binary data
/// blob) Bech32 string
pub trait FromBech32ZipStr: sealed::FromPayload {
/// Reconstructs type data from `z1...` (compressed binary data blob)
/// Bech32 string
fn from_bech32_zip_str(s: &str) -> Result<Self, Error> {
Self::from_bech32_payload(bech32_zip_str_to_payload(HRP_ZIP, s)?)
}
}
impl<T> FromBech32ZipStr for T where T: sealed::FromPayload {}
impl<T> ToBech32String for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
payload_to_bech32_zip_string(T::HRP, &data)
}
}
impl<T> FromBech32Str for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::strict_deserialize(
bech32_zip_str_to_payload(Self::HRP, s)?,
)?))
}
}
}
#[cfg(feature = "zip")]
pub use zip::*;
/// Trait representing given bitcoin hash type as a Bech32 `id1...` value
pub trait ToBech32IdString<Tag>
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
/// Returns Bech32-encoded string in form of `id1...` representing the type
fn to_bech32_id_string(&self) -> String;
}
/// Trait that can generate the type from a given Bech32 `id1...` value
pub trait FromBech32IdStr<Tag>
where
Self: sealed::HashType<Tag> + Sized,
Tag: sha256t::Tag,
{
/// Reconstructs the identifier type from the provided Bech32 `id1...`
/// string
fn from_bech32_id_str(s: &str) -> Result<Self, Error>;
}
impl<T, Tag> ToBech32IdString<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn to_bech32_id_string(&self) -> String {
::bech32::encode(HRP_ID, self.to_inner().to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T, Tag> FromBech32IdStr<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn from_bech32_id_str(s: &str) -> Result<T, Error> {
let (hrp, id, variant) = ::bech32::decode(s)?;
if hrp != HRP_ID {
return Err(Error::WrongPrefix);
}
if variant != Variant::Bech32m {
return Err(Error::WrongVariant);
}
let vec = Vec::<u8>::from_base32(&id)?;
Ok(Self::from_inner(Self::Inner::from_slice(&vec)?))
}
}
/// Helper method for serde serialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn serialize<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: ToBech32String,
{
serializer.serialize_str(&data.to_bech32_string())
}
/// Helper method for serde deserialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromBech32Str,
{
deserializer.deserialize_str(Bech32Visitor::<T>(std::marker::PhantomData))
}
#[cfg(feature = "serde")]
struct | <Value>(std::marker::PhantomData<Value>);
#[cfg(feature = "serde")]
impl<'de, ValueT> Visitor<'de> for Bech32Visitor<ValueT>
where
ValueT: FromBech32Str,
{
type Value = ValueT;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a bech32m-encoded string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
Self::Value::from_bech32_str(v).map_err(|_| {
E::invalid_value(Unexpected::Str(v), &"valid bech32 string")
})
}
}
| Bech32Visitor | identifier_name |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local; |
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
} | use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; | random_line_split |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> |
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local;
use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
}
| {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
} | identifier_body |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned + 'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned + 'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn | <T: Serialize, R: DeserializeOwned + 'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local;
use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
}
| subscribe | identifier_name |
value.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/Connor1996/badger/fileutil"
"github.com/Connor1996/badger/options"
"github.com/Connor1996/badger/y"
"github.com/pingcap/errors"
)
// Values have their first byte being byteData or byteDelete. This helps us distinguish between
// a key that has never been seen and a key that has been explicitly deleted.
const (
bitDelete byte = 1 << 0 // Set if the key has been deleted.
bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
// The MSB 2 bits are for transactions.
bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
mi int64 = 1 << 20
)
type logFile struct {
path string
fd *os.File
fid uint32
size uint32
loadingMode options.FileLoadingMode
}
// openReadOnly assumes that we have a write lock on logFile.
func (lf *logFile) openReadOnly() error {
var err error
lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
if err != nil {
return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
}
fi, err := lf.fd.Stat()
if err != nil {
return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
}
lf.size = uint32(fi.Size())
return nil
}
func (lf *logFile) doneWriting(offset uint32) error {
if err := lf.fd.Truncate(int64(offset)); err != nil {
return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
}
if err := fileutil.Fsync(lf.fd); err != nil {
return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
}
if err := lf.fd.Close(); err != nil {
return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
}
return lf.openReadOnly()
}
// You must hold lf.lock to sync()
func (lf *logFile) sync() error {
return fileutil.Fsync(lf.fd)
}
var errStop = errors.New("Stop iteration")
var errTruncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil |
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = 0
var err error
if lf.fd, err = y.CreateSyncedFile(path, false); err != nil {
return errors.Wrapf(err, "Unable to create value log file")
}
if err = fileutil.Preallocate(lf.fd, vlog.opt.ValueLogFileSize); err != nil {
return errors.Wrap(err, "Unable to preallocate value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
if vlog.curWriter == nil {
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
vlog.curWriter.Reset(lf.fd)
}
if err = syncDir(vlog.dirPath); err != nil {
return errors.Wrapf(err, "Unable to sync value log file dir")
}
vlog.files = append(vlog.files, lf)
syncedFid := atomic.LoadUint32(&vlog.kv.syncedFid)
for len(vlog.files) > vlog.opt.ValueLogMaxNumFiles {
deleteCandidate := vlog.files[0]
if deleteCandidate.fid < syncedFid {
os.Remove(deleteCandidate.path)
deleteCandidate.fd.Close()
vlog.files = vlog.files[1:]
continue
}
break
}
return nil
}
func (vlog *valueLog) Open(kv *DB, opt Options) error {
vlog.dirPath = opt.ValueDir
vlog.opt = opt
vlog.kv = kv
if err := vlog.openOrCreateFiles(kv.opt.ReadOnly); err != nil {
return errors.Wrapf(err, "Unable to open value log")
}
return nil
}
func (vlog *valueLog) Close() error {
var err error
for _, f := range vlog.files {
// A successful close does not guarantee that the data has been successfully saved to disk, as the kernel defers writes.
// It is not common for a file system to flush the buffers when the stream is closed.
if syncErr := fileutil.Fdatasync(f.fd); syncErr != nil {
err = syncErr
}
if closeErr := f.fd.Close(); closeErr != nil && err == nil {
err = closeErr
}
}
return err
}
// Replay replays the value log. The kv provided is only valid for the lifetime of function call.
func (vlog *valueLog) Replay(off logOffset, fn logEntry) error {
fid := off.fid
offset := off.offset
var lastOffset uint32
for _, lf := range vlog.files {
if lf.fid < fid {
continue
}
of := offset
if lf.fid > fid {
of = 0
}
endAt, err := vlog.iterate(lf, of, fn)
if err != nil {
return errors.Wrapf(err, "Unable to replay value log: %q", lf.path)
}
if lf.fid == vlog.maxFid() {
lastOffset = endAt
}
}
// Seek to the end to start writing.
var err error
last := vlog.files[len(vlog.files)-1]
_, err = last.fd.Seek(int64(lastOffset), io.SeekStart)
atomic.AddUint64(&vlog.maxPtr, uint64(lastOffset))
return errors.Wrapf(err, "Unable to seek to end of value log: %q", last.path)
}
type logOffset struct {
fid uint32
offset uint32
}
func (lo logOffset) Less(logOff logOffset) bool {
if lo.fid == logOff.fid {
return lo.offset < logOff.offset
}
return lo.fid < logOff.fid
}
func (lo logOffset) Encode() []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf, lo.fid)
binary.LittleEndian.PutUint32(buf[4:], lo.offset)
return buf
}
func (lo *logOffset) Decode(buf []byte) {
lo.fid = binary.LittleEndian.Uint32(buf)
lo.offset = binary.LittleEndian.Uint32(buf[4:])
}
type request struct {
// Input values
Entries []*Entry
Wg sync.WaitGroup
Err error
}
func (req *request) Wait() error {
req.Wg.Wait()
req.Entries = nil
err := req.Err
requestPool.Put(req)
return err
}
func (vlog *valueLog) getMaxPtr() uint64 {
return atomic.LoadUint64(&vlog.maxPtr)
}
func (vlog *valueLog) maxFid() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr) >> 32)
}
func (vlog *valueLog) writableOffset() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr))
}
func (vlog *valueLog) flush() error {
curlf := vlog.currentLogFile()
if vlog.pendingLen == 0 {
return nil
}
err := vlog.curWriter.Flush()
if err != nil {
return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
}
vlog.metrics.NumWrites.Inc()
vlog.metrics.NumVLogBytesWritten.Add(float64(vlog.pendingLen))
atomic.AddUint64(&vlog.maxPtr, uint64(vlog.pendingLen))
vlog.pendingLen = 0
if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
var err error
if err = curlf.doneWriting(vlog.writableOffset()); err != nil {
return err
}
err = vlog.createVlogFile(vlog.maxFid() + 1)
if err != nil {
return err
}
}
return nil
}
// write is thread-unsafe by design and should not be called concurrently.
func (vlog *valueLog) write(reqs []*request) error {
for i := range reqs {
b := reqs[i]
for j := range b.Entries {
e := b.Entries[j]
plen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.
if err != nil {
return err
}
vlog.curWriter.Append(vlog.buf.Bytes())
vlog.buf.Reset()
vlog.pendingLen += plen
e.logOffset.fid = vlog.currentLogFile().fid
// Use the offset including buffer length so far.
e.logOffset.offset = vlog.writableOffset() + uint32(vlog.pendingLen)
}
vlog.numEntriesWritten += uint32(len(b.Entries))
// We write to disk here so that all entries that are part of the same transaction are
// written to the same vlog file.
writeNow :=
vlog.writableOffset()+uint32(vlog.pendingLen) > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
if writeNow {
if err := vlog.flush(); err != nil {
return err
}
}
}
return vlog.flush()
// Acquire mutex locks around this manipulation, so that the reads don't try to use
// an invalid file descriptor.
}
// Gets the logFile.
func (vlog *valueLog) getFile(fid uint32) (*logFile, error) {
for i := len(vlog.files) - 1; i >= 0; i-- {
file := vlog.files[i]
if file.fid == fid {
return file, nil
}
}
// log file has gone away, will need to retry the operation.
return nil, ErrRetry
}
| {
if err == io.EOF {
err = errTruncate
}
return nil, err
} | conditional_block |
value.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/Connor1996/badger/fileutil"
"github.com/Connor1996/badger/options"
"github.com/Connor1996/badger/y"
"github.com/pingcap/errors"
)
// Values have their first byte being byteData or byteDelete. This helps us distinguish between
// a key that has never been seen and a key that has been explicitly deleted.
const (
bitDelete byte = 1 << 0 // Set if the key has been deleted.
bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
// The MSB 2 bits are for transactions.
bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
mi int64 = 1 << 20
)
type logFile struct {
path string
fd *os.File
fid uint32
size uint32
loadingMode options.FileLoadingMode
}
// openReadOnly assumes that we have a write lock on logFile.
func (lf *logFile) openReadOnly() error {
var err error
lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
if err != nil {
return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
}
fi, err := lf.fd.Stat()
if err != nil {
return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
}
lf.size = uint32(fi.Size())
return nil
}
func (lf *logFile) doneWriting(offset uint32) error {
if err := lf.fd.Truncate(int64(offset)); err != nil {
return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
}
if err := fileutil.Fsync(lf.fd); err != nil {
return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
}
if err := lf.fd.Close(); err != nil {
return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
}
return lf.openReadOnly()
}
// You must hold lf.lock to sync()
func (lf *logFile) sync() error {
return fileutil.Fsync(lf.fd)
}
var errStop = errors.New("Stop iteration")
var errTruncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string |
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = 0
var err error
if lf.fd, err = y.CreateSyncedFile(path, false); err != nil {
return errors.Wrapf(err, "Unable to create value log file")
}
if err = fileutil.Preallocate(lf.fd, vlog.opt.ValueLogFileSize); err != nil {
return errors.Wrap(err, "Unable to preallocate value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
if vlog.curWriter == nil {
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
vlog.curWriter.Reset(lf.fd)
}
if err = syncDir(vlog.dirPath); err != nil {
return errors.Wrapf(err, "Unable to sync value log file dir")
}
vlog.files = append(vlog.files, lf)
syncedFid := atomic.LoadUint32(&vlog.kv.syncedFid)
for len(vlog.files) > vlog.opt.ValueLogMaxNumFiles {
deleteCandidate := vlog.files[0]
if deleteCandidate.fid < syncedFid {
os.Remove(deleteCandidate.path)
deleteCandidate.fd.Close()
vlog.files = vlog.files[1:]
continue
}
break
}
return nil
}
func (vlog *valueLog) Open(kv *DB, opt Options) error {
vlog.dirPath = opt.ValueDir
vlog.opt = opt
vlog.kv = kv
if err := vlog.openOrCreateFiles(kv.opt.ReadOnly); err != nil {
return errors.Wrapf(err, "Unable to open value log")
}
return nil
}
func (vlog *valueLog) Close() error {
var err error
for _, f := range vlog.files {
// A successful close does not guarantee that the data has been successfully saved to disk, as the kernel defers writes.
// It is not common for a file system to flush the buffers when the stream is closed.
if syncErr := fileutil.Fdatasync(f.fd); syncErr != nil {
err = syncErr
}
if closeErr := f.fd.Close(); closeErr != nil && err == nil {
err = closeErr
}
}
return err
}
// Replay replays the value log. The kv provided is only valid for the lifetime of function call.
func (vlog *valueLog) Replay(off logOffset, fn logEntry) error {
fid := off.fid
offset := off.offset
var lastOffset uint32
for _, lf := range vlog.files {
if lf.fid < fid {
continue
}
of := offset
if lf.fid > fid {
of = 0
}
endAt, err := vlog.iterate(lf, of, fn)
if err != nil {
return errors.Wrapf(err, "Unable to replay value log: %q", lf.path)
}
if lf.fid == vlog.maxFid() {
lastOffset = endAt
}
}
// Seek to the end to start writing.
var err error
last := vlog.files[len(vlog.files)-1]
_, err = last.fd.Seek(int64(lastOffset), io.SeekStart)
atomic.AddUint64(&vlog.maxPtr, uint64(lastOffset))
return errors.Wrapf(err, "Unable to seek to end of value log: %q", last.path)
}
type logOffset struct {
fid uint32
offset uint32
}
func (lo logOffset) Less(logOff logOffset) bool {
if lo.fid == logOff.fid {
return lo.offset < logOff.offset
}
return lo.fid < logOff.fid
}
func (lo logOffset) Encode() []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf, lo.fid)
binary.LittleEndian.PutUint32(buf[4:], lo.offset)
return buf
}
func (lo *logOffset) Decode(buf []byte) {
lo.fid = binary.LittleEndian.Uint32(buf)
lo.offset = binary.LittleEndian.Uint32(buf[4:])
}
type request struct {
// Input values
Entries []*Entry
Wg sync.WaitGroup
Err error
}
func (req *request) Wait() error {
req.Wg.Wait()
req.Entries = nil
err := req.Err
requestPool.Put(req)
return err
}
func (vlog *valueLog) getMaxPtr() uint64 {
return atomic.LoadUint64(&vlog.maxPtr)
}
func (vlog *valueLog) maxFid() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr) >> 32)
}
func (vlog *valueLog) writableOffset() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr))
}
func (vlog *valueLog) flush() error {
curlf := vlog.currentLogFile()
if vlog.pendingLen == 0 {
return nil
}
err := vlog.curWriter.Flush()
if err != nil {
return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
}
vlog.metrics.NumWrites.Inc()
vlog.metrics.NumVLogBytesWritten.Add(float64(vlog.pendingLen))
atomic.AddUint64(&vlog.maxPtr, uint64(vlog.pendingLen))
vlog.pendingLen = 0
if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
var err error
if err = curlf.doneWriting(vlog.writableOffset()); err != nil {
return err
}
err = vlog.createVlogFile(vlog.maxFid() + 1)
if err != nil {
return err
}
}
return nil
}
// write is thread-unsafe by design and should not be called concurrently.
func (vlog *valueLog) write(reqs []*request) error {
for i := range reqs {
b := reqs[i]
for j := range b.Entries {
e := b.Entries[j]
plen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.
if err != nil {
return err
}
vlog.curWriter.Append(vlog.buf.Bytes())
vlog.buf.Reset()
vlog.pendingLen += plen
e.logOffset.fid = vlog.currentLogFile().fid
// Use the offset including buffer length so far.
e.logOffset.offset = vlog.writableOffset() + uint32(vlog.pendingLen)
}
vlog.numEntriesWritten += uint32(len(b.Entries))
// We write to disk here so that all entries that are part of the same transaction are
// written to the same vlog file.
writeNow :=
vlog.writableOffset()+uint32(vlog.pendingLen) > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
if writeNow {
if err := vlog.flush(); err != nil {
return err
}
}
}
return vlog.flush()
// Acquire mutex locks around this manipulation, so that the reads don't try to use
// an invalid file descriptor.
}
// Gets the logFile.
func (vlog *valueLog) getFile(fid uint32) (*logFile, error) {
for i := len(vlog.files) - 1; i >= 0; i-- {
file := vlog.files[i]
if file.fid == fid {
return file, nil
}
}
// log file has gone away, will need to retry the operation.
return nil, ErrRetry
}
| {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
} | identifier_body |
value.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/Connor1996/badger/fileutil"
"github.com/Connor1996/badger/options"
"github.com/Connor1996/badger/y"
"github.com/pingcap/errors"
)
// Values have their first byte being byteData or byteDelete. This helps us distinguish between
// a key that has never been seen and a key that has been explicitly deleted.
const (
bitDelete byte = 1 << 0 // Set if the key has been deleted.
bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
// The MSB 2 bits are for transactions.
bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
mi int64 = 1 << 20
)
type logFile struct {
path string
fd *os.File
fid uint32
size uint32
loadingMode options.FileLoadingMode
}
// openReadOnly assumes that we have a write lock on logFile.
func (lf *logFile) openReadOnly() error {
var err error
lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
if err != nil {
return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
}
fi, err := lf.fd.Stat()
if err != nil {
return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
}
lf.size = uint32(fi.Size())
return nil
}
func (lf *logFile) doneWriting(offset uint32) error {
if err := lf.fd.Truncate(int64(offset)); err != nil {
return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
}
if err := fileutil.Fsync(lf.fd); err != nil {
return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
}
if err := lf.fd.Close(); err != nil {
return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
}
return lf.openReadOnly()
}
// You must hold lf.lock to sync()
func (lf *logFile) sync() error {
return fileutil.Fsync(lf.fd)
}
var errStop = errors.New("Stop iteration")
var errTruncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) fpath(fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid | if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = 0
var err error
if lf.fd, err = y.CreateSyncedFile(path, false); err != nil {
return errors.Wrapf(err, "Unable to create value log file")
}
if err = fileutil.Preallocate(lf.fd, vlog.opt.ValueLogFileSize); err != nil {
return errors.Wrap(err, "Unable to preallocate value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
if vlog.curWriter == nil {
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
vlog.curWriter.Reset(lf.fd)
}
if err = syncDir(vlog.dirPath); err != nil {
return errors.Wrapf(err, "Unable to sync value log file dir")
}
vlog.files = append(vlog.files, lf)
syncedFid := atomic.LoadUint32(&vlog.kv.syncedFid)
for len(vlog.files) > vlog.opt.ValueLogMaxNumFiles {
deleteCandidate := vlog.files[0]
if deleteCandidate.fid < syncedFid {
os.Remove(deleteCandidate.path)
deleteCandidate.fd.Close()
vlog.files = vlog.files[1:]
continue
}
break
}
return nil
}
func (vlog *valueLog) Open(kv *DB, opt Options) error {
vlog.dirPath = opt.ValueDir
vlog.opt = opt
vlog.kv = kv
if err := vlog.openOrCreateFiles(kv.opt.ReadOnly); err != nil {
return errors.Wrapf(err, "Unable to open value log")
}
return nil
}
func (vlog *valueLog) Close() error {
var err error
for _, f := range vlog.files {
// A successful close does not guarantee that the data has been successfully saved to disk, as the kernel defers writes.
// It is not common for a file system to flush the buffers when the stream is closed.
if syncErr := fileutil.Fdatasync(f.fd); syncErr != nil {
err = syncErr
}
if closeErr := f.fd.Close(); closeErr != nil && err == nil {
err = closeErr
}
}
return err
}
// Replay replays the value log. The kv provided is only valid for the lifetime of function call.
func (vlog *valueLog) Replay(off logOffset, fn logEntry) error {
fid := off.fid
offset := off.offset
var lastOffset uint32
for _, lf := range vlog.files {
if lf.fid < fid {
continue
}
of := offset
if lf.fid > fid {
of = 0
}
endAt, err := vlog.iterate(lf, of, fn)
if err != nil {
return errors.Wrapf(err, "Unable to replay value log: %q", lf.path)
}
if lf.fid == vlog.maxFid() {
lastOffset = endAt
}
}
// Seek to the end to start writing.
var err error
last := vlog.files[len(vlog.files)-1]
_, err = last.fd.Seek(int64(lastOffset), io.SeekStart)
atomic.AddUint64(&vlog.maxPtr, uint64(lastOffset))
return errors.Wrapf(err, "Unable to seek to end of value log: %q", last.path)
}
type logOffset struct {
fid uint32
offset uint32
}
func (lo logOffset) Less(logOff logOffset) bool {
if lo.fid == logOff.fid {
return lo.offset < logOff.offset
}
return lo.fid < logOff.fid
}
func (lo logOffset) Encode() []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf, lo.fid)
binary.LittleEndian.PutUint32(buf[4:], lo.offset)
return buf
}
func (lo *logOffset) Decode(buf []byte) {
lo.fid = binary.LittleEndian.Uint32(buf)
lo.offset = binary.LittleEndian.Uint32(buf[4:])
}
type request struct {
// Input values
Entries []*Entry
Wg sync.WaitGroup
Err error
}
func (req *request) Wait() error {
req.Wg.Wait()
req.Entries = nil
err := req.Err
requestPool.Put(req)
return err
}
func (vlog *valueLog) getMaxPtr() uint64 {
return atomic.LoadUint64(&vlog.maxPtr)
}
func (vlog *valueLog) maxFid() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr) >> 32)
}
func (vlog *valueLog) writableOffset() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr))
}
func (vlog *valueLog) flush() error {
curlf := vlog.currentLogFile()
if vlog.pendingLen == 0 {
return nil
}
err := vlog.curWriter.Flush()
if err != nil {
return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
}
vlog.metrics.NumWrites.Inc()
vlog.metrics.NumVLogBytesWritten.Add(float64(vlog.pendingLen))
atomic.AddUint64(&vlog.maxPtr, uint64(vlog.pendingLen))
vlog.pendingLen = 0
if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
var err error
if err = curlf.doneWriting(vlog.writableOffset()); err != nil {
return err
}
err = vlog.createVlogFile(vlog.maxFid() + 1)
if err != nil {
return err
}
}
return nil
}
// write is thread-unsafe by design and should not be called concurrently.
func (vlog *valueLog) write(reqs []*request) error {
for i := range reqs {
b := reqs[i]
for j := range b.Entries {
e := b.Entries[j]
plen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.
if err != nil {
return err
}
vlog.curWriter.Append(vlog.buf.Bytes())
vlog.buf.Reset()
vlog.pendingLen += plen
e.logOffset.fid = vlog.currentLogFile().fid
// Use the offset including buffer length so far.
e.logOffset.offset = vlog.writableOffset() + uint32(vlog.pendingLen)
}
vlog.numEntriesWritten += uint32(len(b.Entries))
// We write to disk here so that all entries that are part of the same transaction are
// written to the same vlog file.
writeNow :=
vlog.writableOffset()+uint32(vlog.pendingLen) > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
if writeNow {
if err := vlog.flush(); err != nil {
return err
}
}
}
return vlog.flush()
// Acquire mutex locks around this manipulation, so that the reads don't try to use
// an invalid file descriptor.
}
// Gets the logFile.
func (vlog *valueLog) getFile(fid uint32) (*logFile, error) {
for i := len(vlog.files) - 1; i >= 0; i-- {
file := vlog.files[i]
if file.fid == fid {
return file, nil
}
}
// log file has gone away, will need to retry the operation.
return nil, ErrRetry
} | })
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files { | random_line_split |
value.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/Connor1996/badger/fileutil"
"github.com/Connor1996/badger/options"
"github.com/Connor1996/badger/y"
"github.com/pingcap/errors"
)
// Values have their first byte being byteData or byteDelete. This helps us distinguish between
// a key that has never been seen and a key that has been explicitly deleted.
const (
bitDelete byte = 1 << 0 // Set if the key has been deleted.
bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
// The MSB 2 bits are for transactions.
bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
mi int64 = 1 << 20
)
type logFile struct {
path string
fd *os.File
fid uint32
size uint32
loadingMode options.FileLoadingMode
}
// openReadOnly assumes that we have a write lock on logFile.
func (lf *logFile) openReadOnly() error {
var err error
lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
if err != nil {
return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
}
fi, err := lf.fd.Stat()
if err != nil {
return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
}
lf.size = uint32(fi.Size())
return nil
}
func (lf *logFile) doneWriting(offset uint32) error {
if err := lf.fd.Truncate(int64(offset)); err != nil {
return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
}
if err := fileutil.Fsync(lf.fd); err != nil {
return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
}
if err := lf.fd.Close(); err != nil {
return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
}
return lf.openReadOnly()
}
// You must hold lf.lock to sync()
func (lf *logFile) sync() error {
return fileutil.Fsync(lf.fd)
}
var errStop = errors.New("Stop iteration")
var errTruncate = errors.New("Do truncate")
type logEntry func(e Entry) error
type safeRead struct {
k []byte
v []byte
um []byte
recordOffset uint32
}
func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
var hbuf [headerBufSize]byte
var err error
hash := crc32.New(y.CastagnoliCrcTable)
tee := io.TeeReader(reader, hash)
if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
return nil, err
}
// Encounter preallocated region, just act as EOF.
if !isEncodedHeader(hbuf[:]) {
return nil, io.EOF
}
var h header
h.Decode(hbuf[:])
if h.klen > maxKeySize {
return nil, errTruncate
}
kl := int(h.klen)
if cap(r.k) < kl {
r.k = make([]byte, 2*kl)
}
vl := int(h.vlen)
if cap(r.v) < vl {
r.v = make([]byte, 2*vl)
}
e := &Entry{}
e.offset = r.recordOffset
e.Key = r.k[:kl]
e.Value = r.v[:vl]
if h.umlen > 0 {
if cap(r.um) < int(h.umlen) {
r.um = make([]byte, 2*h.umlen)
}
e.UserMeta = r.um[:h.umlen]
if _, err = io.ReadFull(tee, e.UserMeta); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
}
if _, err = io.ReadFull(tee, e.Key); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
if _, err = io.ReadFull(tee, e.Value); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
var crcBuf [4]byte
if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
if err == io.EOF {
err = errTruncate
}
return nil, err
}
crc := binary.BigEndian.Uint32(crcBuf[:])
if crc != hash.Sum32() {
return nil, errTruncate
}
e.meta = h.meta
return e, nil
}
// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
// Therefore, the kv pair is only valid for the duration of fn call.
func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
_, err := lf.fd.Seek(int64(offset), io.SeekStart)
if err != nil {
return 0, y.Wrap(err)
}
reader := bufio.NewReader(lf.fd)
read := &safeRead{
k: make([]byte, 10),
v: make([]byte, 10),
recordOffset: offset,
}
var lastCommit uint64
validEndOffset := read.recordOffset
for {
e, err := read.Entry(reader)
if err == io.EOF {
break
} else if err == io.ErrUnexpectedEOF || err == errTruncate {
break
} else if err != nil {
return validEndOffset, err
} else if e == nil {
continue
}
read.recordOffset += uint32(headerBufSize + len(e.Key) + len(e.Value) + len(e.UserMeta) + 4) // len(crcBuf)
if e.meta&bitTxn > 0 {
txnTs := y.ParseTs(e.Key)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
break
}
} else if e.meta&bitFinTxn > 0 {
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil || lastCommit != txnTs {
break
}
// Got the end of txn. Now we can store them.
lastCommit = 0
validEndOffset = read.recordOffset
} else {
if lastCommit != 0 {
// This is most likely an entry which was moved as part of GC.
// We shouldn't get this entry in the middle of a transaction.
break
}
validEndOffset = read.recordOffset
}
if vlog.opt.ReadOnly {
return validEndOffset, ErrReplayNeeded
}
if err := fn(*e); err != nil {
if err == errStop {
break
}
return validEndOffset, y.Wrap(err)
}
}
return validEndOffset, nil
}
func (vlog *valueLog) deleteLogFile(lf *logFile) error {
path := vlog.fpath(lf.fid)
if err := lf.fd.Close(); err != nil {
return err
}
return os.Remove(path)
}
// lfDiscardStats keeps track of the amount of data that could be discarded for
// a given logfile.
type lfDiscardStats struct {
sync.Mutex
m map[uint32]int64
}
type valueLog struct {
buf bytes.Buffer
pendingLen int
dirPath string
curWriter *fileutil.BufferedWriter
files []*logFile
kv *DB
maxPtr uint64
numEntriesWritten uint32
opt Options
metrics *y.MetricsSet
}
func vlogFilePath(dirPath string, fid uint32) string {
return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
}
func (vlog *valueLog) | (fid uint32) string {
return vlogFilePath(vlog.dirPath, fid)
}
func (vlog *valueLog) currentLogFile() *logFile {
if len(vlog.files) > 0 {
return vlog.files[len(vlog.files)-1]
}
return nil
}
func (vlog *valueLog) openOrCreateFiles(readOnly bool) error {
files, err := ioutil.ReadDir(vlog.dirPath)
if err != nil {
return errors.Wrapf(err, "Error while opening value log")
}
found := make(map[uint64]struct{})
var maxFid uint32 // Beware len(files) == 0 case, this starts at 0.
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".vlog") {
continue
}
fsz := len(file.Name())
fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
if err != nil {
return errors.Wrapf(err, "Error while parsing value log id for file: %q", file.Name())
}
if _, ok := found[fid]; ok {
return errors.Errorf("Found the same value log file twice: %d", fid)
}
found[fid] = struct{}{}
lf := &logFile{
fid: uint32(fid),
path: vlog.fpath(uint32(fid)),
loadingMode: vlog.opt.ValueLogLoadingMode,
}
vlog.files = append(vlog.files, lf)
if uint32(fid) > maxFid {
maxFid = uint32(fid)
}
}
vlog.maxPtr = uint64(maxFid) << 32
sort.Slice(vlog.files, func(i, j int) bool {
return vlog.files[i].fid < vlog.files[j].fid
})
// Open all previous log files as read only. Open the last log file
// as read write (unless the DB is read only).
for _, lf := range vlog.files {
if lf.fid == maxFid {
var flags uint32
if readOnly {
flags |= y.ReadOnly
}
if lf.fd, err = y.OpenExistingFile(lf.path, flags); err != nil {
return errors.Wrapf(err, "Unable to open value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
if err := lf.openReadOnly(); err != nil {
return err
}
}
}
// If no files are found, then create a new file.
if len(vlog.files) == 0 {
// We already set vlog.maxFid above
err = vlog.createVlogFile(0)
if err != nil {
return err
}
}
return nil
}
func (vlog *valueLog) createVlogFile(fid uint32) error {
atomic.StoreUint64(&vlog.maxPtr, uint64(fid)<<32)
path := vlog.fpath(fid)
lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode}
vlog.numEntriesWritten = 0
var err error
if lf.fd, err = y.CreateSyncedFile(path, false); err != nil {
return errors.Wrapf(err, "Unable to create value log file")
}
if err = fileutil.Preallocate(lf.fd, vlog.opt.ValueLogFileSize); err != nil {
return errors.Wrap(err, "Unable to preallocate value log file")
}
opt := &vlog.opt.ValueLogWriteOptions
if vlog.curWriter == nil {
vlog.curWriter = fileutil.NewBufferedWriter(lf.fd, opt.WriteBufferSize, nil)
} else {
vlog.curWriter.Reset(lf.fd)
}
if err = syncDir(vlog.dirPath); err != nil {
return errors.Wrapf(err, "Unable to sync value log file dir")
}
vlog.files = append(vlog.files, lf)
syncedFid := atomic.LoadUint32(&vlog.kv.syncedFid)
for len(vlog.files) > vlog.opt.ValueLogMaxNumFiles {
deleteCandidate := vlog.files[0]
if deleteCandidate.fid < syncedFid {
os.Remove(deleteCandidate.path)
deleteCandidate.fd.Close()
vlog.files = vlog.files[1:]
continue
}
break
}
return nil
}
func (vlog *valueLog) Open(kv *DB, opt Options) error {
vlog.dirPath = opt.ValueDir
vlog.opt = opt
vlog.kv = kv
if err := vlog.openOrCreateFiles(kv.opt.ReadOnly); err != nil {
return errors.Wrapf(err, "Unable to open value log")
}
return nil
}
func (vlog *valueLog) Close() error {
var err error
for _, f := range vlog.files {
// A successful close does not guarantee that the data has been successfully saved to disk, as the kernel defers writes.
// It is not common for a file system to flush the buffers when the stream is closed.
if syncErr := fileutil.Fdatasync(f.fd); syncErr != nil {
err = syncErr
}
if closeErr := f.fd.Close(); closeErr != nil && err == nil {
err = closeErr
}
}
return err
}
// Replay replays the value log. The kv provided is only valid for the lifetime of function call.
func (vlog *valueLog) Replay(off logOffset, fn logEntry) error {
fid := off.fid
offset := off.offset
var lastOffset uint32
for _, lf := range vlog.files {
if lf.fid < fid {
continue
}
of := offset
if lf.fid > fid {
of = 0
}
endAt, err := vlog.iterate(lf, of, fn)
if err != nil {
return errors.Wrapf(err, "Unable to replay value log: %q", lf.path)
}
if lf.fid == vlog.maxFid() {
lastOffset = endAt
}
}
// Seek to the end to start writing.
var err error
last := vlog.files[len(vlog.files)-1]
_, err = last.fd.Seek(int64(lastOffset), io.SeekStart)
atomic.AddUint64(&vlog.maxPtr, uint64(lastOffset))
return errors.Wrapf(err, "Unable to seek to end of value log: %q", last.path)
}
type logOffset struct {
fid uint32
offset uint32
}
func (lo logOffset) Less(logOff logOffset) bool {
if lo.fid == logOff.fid {
return lo.offset < logOff.offset
}
return lo.fid < logOff.fid
}
func (lo logOffset) Encode() []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf, lo.fid)
binary.LittleEndian.PutUint32(buf[4:], lo.offset)
return buf
}
func (lo *logOffset) Decode(buf []byte) {
lo.fid = binary.LittleEndian.Uint32(buf)
lo.offset = binary.LittleEndian.Uint32(buf[4:])
}
type request struct {
// Input values
Entries []*Entry
Wg sync.WaitGroup
Err error
}
func (req *request) Wait() error {
req.Wg.Wait()
req.Entries = nil
err := req.Err
requestPool.Put(req)
return err
}
func (vlog *valueLog) getMaxPtr() uint64 {
return atomic.LoadUint64(&vlog.maxPtr)
}
func (vlog *valueLog) maxFid() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr) >> 32)
}
func (vlog *valueLog) writableOffset() uint32 {
return uint32(atomic.LoadUint64(&vlog.maxPtr))
}
func (vlog *valueLog) flush() error {
curlf := vlog.currentLogFile()
if vlog.pendingLen == 0 {
return nil
}
err := vlog.curWriter.Flush()
if err != nil {
return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
}
vlog.metrics.NumWrites.Inc()
vlog.metrics.NumVLogBytesWritten.Add(float64(vlog.pendingLen))
atomic.AddUint64(&vlog.maxPtr, uint64(vlog.pendingLen))
vlog.pendingLen = 0
if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
var err error
if err = curlf.doneWriting(vlog.writableOffset()); err != nil {
return err
}
err = vlog.createVlogFile(vlog.maxFid() + 1)
if err != nil {
return err
}
}
return nil
}
// write is thread-unsafe by design and should not be called concurrently.
func (vlog *valueLog) write(reqs []*request) error {
for i := range reqs {
b := reqs[i]
for j := range b.Entries {
e := b.Entries[j]
plen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.
if err != nil {
return err
}
vlog.curWriter.Append(vlog.buf.Bytes())
vlog.buf.Reset()
vlog.pendingLen += plen
e.logOffset.fid = vlog.currentLogFile().fid
// Use the offset including buffer length so far.
e.logOffset.offset = vlog.writableOffset() + uint32(vlog.pendingLen)
}
vlog.numEntriesWritten += uint32(len(b.Entries))
// We write to disk here so that all entries that are part of the same transaction are
// written to the same vlog file.
writeNow :=
vlog.writableOffset()+uint32(vlog.pendingLen) > uint32(vlog.opt.ValueLogFileSize) ||
vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
if writeNow {
if err := vlog.flush(); err != nil {
return err
}
}
}
return vlog.flush()
// Acquire mutex locks around this manipulation, so that the reads don't try to use
// an invalid file descriptor.
}
// Gets the logFile.
func (vlog *valueLog) getFile(fid uint32) (*logFile, error) {
for i := len(vlog.files) - 1; i >= 0; i-- {
file := vlog.files[i]
if file.fid == fid {
return file, nil
}
}
// log file has gone away, will need to retry the operation.
return nil, ErrRetry
}
| fpath | identifier_name |
utils.go | package appimage
import (
"bytes"
"debug/elf"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
au "github.com/srevinsaju/appimage-update"
"github.com/srevinsaju/zap/config"
"github.com/srevinsaju/zap/index"
"github.com/srevinsaju/zap/internal/helpers"
"github.com/srevinsaju/zap/tui"
"github.com/srevinsaju/zap/types"
)
func List(zapConfig config.Store, index bool) ([]string, error) {
var apps []string
err := filepath.Walk(zapConfig.IndexStore, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return err
}
appName := ""
if index {
appName = path
} else {
appName = filepath.Base(path)
appName = strings.TrimSuffix(appName, ".json")
}
apps = append(apps, appName)
return err
})
return apps, err
}
func Install(options types.InstallOptions, config config.Store) error {
var asset types.ZapDlAsset
var err error
sourceIdentifier := ""
sourceSlug := ""
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
// check if the app is already installed
// if it is, do not continue
if helpers.CheckIfFileExists(indexFile) && !options.UpdateInplace {
fmt.Printf("%s is already installed \n", tui.Yellow(options.Executable))
return nil
} else if helpers.CheckIfFileExists(indexFile) {
// has the user requested to update the app in-place?
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.RemovePreviousVersions {
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.FromGithub {
asset, err = index.GitHubSurveyUserReleases(options, config)
sourceSlug = options.From
sourceIdentifier = SourceGitHub
if err != nil {
return err
}
} else if options.From == "" {
sourceIdentifier = SourceZapIndex
sourceSlug = options.Name
asset, err = index.ZapSurveyUserReleases(options, config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndInstall(options types.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return app, nil
}
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return app, err
}
err = json.Unmarshal(indexBytes, app)
if err != nil {
return app, err
}
if !options.UseAppImageUpdate || !checkIfUpdateInformationExists(app.Filepath) {
logger.Debug("This app has no update information embedded")
// the appimage does nofalset contain update information
// we need to fetch the metadata from the index
if app.Source.Identifier == SourceGitHub {
logger.Debug("Fallback to GitHub API call from installation method")
installOptions := types.InstallOptions{
Name: app.Executable,
From: app.Source.Meta.Slug,
Executable: strings.Trim(app.Executable, " "),
FromGithub: true,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else if app.Source.Identifier == SourceZapIndex {
logger.Debug("Fallback to zap index from appimage.github.io")
installOptions := types.InstallOptions{
Name: app.Executable,
From: "",
Executable: strings.Trim(app.Executable, " "),
FromGithub: false,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else {
if options.Silent {
logger.Warn("%s has no update information. " +
"Please ask the AppImage author to include updateinformation for the best experience. " +
"Skipping.")
return nil, nil
} else {
return nil, errors.New("appimage has no update information")
}
}
}
logger.Debugf("Creating new updater instance from %s", app.Filepath)
updater, err := au.NewUpdaterFor(app.Filepath)
if err != nil {
return app, err
}
| return app, err
}
if !hasUpdates {
return app, errors.New("up-to-date")
}
logger.Debugf("Downloading updates for %s", app.Executable)
newFileName, err := updater.Download()
fmt.Print("\n")
app.Filepath = newFileName
_ = os.Remove(app.IconPath)
_ = os.Remove(app.DesktopFile)
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
if err != nil {
return app, err
}
logger.Debug("Saving new index as JSON")
newIdxBytes, err := json.Marshal(*app)
if err != nil {
return app, err
}
logger.Debugf("Writing to %s", indexFile)
err = ioutil.WriteFile(indexFile, newIdxBytes, 0644)
if err != nil {
return app, err
}
return app, nil
}
// checkIfUpdateInformationExists checks if the appimage contains Update Information
// adapted directly from https://github.com/AppImageCrafters/appimage-update
func checkIfUpdateInformationExists(f string) bool {
elfFile, err := elf.Open(f)
if err != nil {
panic("Unable to open target: \"" + f + "\"." + err.Error())
}
updInfo := elfFile.Section(".upd_info")
sectionData, err := updInfo.Data()
if err != nil {
return false
}
strEnd := bytes.Index(sectionData, []byte("\000"))
return updInfo != nil && strEnd != -1 && strEnd != 0
}
// Remove function helps to remove an appimage, given its executable name
// with which it was registered
func Remove(options types.RemoveOptions, config config.Store) error {
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return nil
}
bar := tui.NewProgressBar(7, "r")
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return err
}
bar.Add(1)
err = json.Unmarshal(indexBytes, app)
if err != nil {
return err
}
if app.IconPath != "" {
logger.Debugf("Removing thumbnail, %s", app.IconPath)
os.Remove(app.IconPath)
}
bar.Add(1)
if app.IconPathHicolor != "" {
logger.Debugf("Removing symlink to hicolor theme, %s", app.IconPathHicolor)
os.Remove(app.IconPathHicolor)
}
bar.Add(1)
if app.DesktopFile != "" {
logger.Debugf("Removing desktop file, %s", app.DesktopFile)
os.Remove(app.DesktopFile)
}
bar.Add(1)
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if helpers.CheckIfFileExists(binFile) {
binAbsPath, err := filepath.EvalSymlinks(binFile)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
_ = os.Remove(binFile)
}
}
bar.Add(1)
logger.Debugf("Removing appimage, %s", app.Filepath)
_ = os.Remove(app.Filepath)
bar.Add(1)
logger.Debugf("Removing index file, %s", indexFile)
_ = os.Remove(indexFile)
bar.Add(1)
bar.Finish()
fmt.Printf("\n")
fmt.Printf("✅ %s removed successfully\n", app.Executable)
logger.Debugf("Removing all files completed successfully")
return bar.Finish()
} | logger.Debugf("Checking for updates")
hasUpdates, err := updater.Lookup()
if err != nil { | random_line_split |
utils.go | package appimage
import (
"bytes"
"debug/elf"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
au "github.com/srevinsaju/appimage-update"
"github.com/srevinsaju/zap/config"
"github.com/srevinsaju/zap/index"
"github.com/srevinsaju/zap/internal/helpers"
"github.com/srevinsaju/zap/tui"
"github.com/srevinsaju/zap/types"
)
func List(zapConfig config.Store, index bool) ([]string, error) |
func Install(options types.InstallOptions, config config.Store) error {
var asset types.ZapDlAsset
var err error
sourceIdentifier := ""
sourceSlug := ""
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
// check if the app is already installed
// if it is, do not continue
if helpers.CheckIfFileExists(indexFile) && !options.UpdateInplace {
fmt.Printf("%s is already installed \n", tui.Yellow(options.Executable))
return nil
} else if helpers.CheckIfFileExists(indexFile) {
// has the user requested to update the app in-place?
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.RemovePreviousVersions {
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.FromGithub {
asset, err = index.GitHubSurveyUserReleases(options, config)
sourceSlug = options.From
sourceIdentifier = SourceGitHub
if err != nil {
return err
}
} else if options.From == "" {
sourceIdentifier = SourceZapIndex
sourceSlug = options.Name
asset, err = index.ZapSurveyUserReleases(options, config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndInstall(options types.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return app, nil
}
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return app, err
}
err = json.Unmarshal(indexBytes, app)
if err != nil {
return app, err
}
if !options.UseAppImageUpdate || !checkIfUpdateInformationExists(app.Filepath) {
logger.Debug("This app has no update information embedded")
// the appimage does nofalset contain update information
// we need to fetch the metadata from the index
if app.Source.Identifier == SourceGitHub {
logger.Debug("Fallback to GitHub API call from installation method")
installOptions := types.InstallOptions{
Name: app.Executable,
From: app.Source.Meta.Slug,
Executable: strings.Trim(app.Executable, " "),
FromGithub: true,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else if app.Source.Identifier == SourceZapIndex {
logger.Debug("Fallback to zap index from appimage.github.io")
installOptions := types.InstallOptions{
Name: app.Executable,
From: "",
Executable: strings.Trim(app.Executable, " "),
FromGithub: false,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else {
if options.Silent {
logger.Warn("%s has no update information. " +
"Please ask the AppImage author to include updateinformation for the best experience. " +
"Skipping.")
return nil, nil
} else {
return nil, errors.New("appimage has no update information")
}
}
}
logger.Debugf("Creating new updater instance from %s", app.Filepath)
updater, err := au.NewUpdaterFor(app.Filepath)
if err != nil {
return app, err
}
logger.Debugf("Checking for updates")
hasUpdates, err := updater.Lookup()
if err != nil {
return app, err
}
if !hasUpdates {
return app, errors.New("up-to-date")
}
logger.Debugf("Downloading updates for %s", app.Executable)
newFileName, err := updater.Download()
fmt.Print("\n")
app.Filepath = newFileName
_ = os.Remove(app.IconPath)
_ = os.Remove(app.DesktopFile)
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
if err != nil {
return app, err
}
logger.Debug("Saving new index as JSON")
newIdxBytes, err := json.Marshal(*app)
if err != nil {
return app, err
}
logger.Debugf("Writing to %s", indexFile)
err = ioutil.WriteFile(indexFile, newIdxBytes, 0644)
if err != nil {
return app, err
}
return app, nil
}
// checkIfUpdateInformationExists checks if the appimage contains Update Information
// adapted directly from https://github.com/AppImageCrafters/appimage-update
func checkIfUpdateInformationExists(f string) bool {
elfFile, err := elf.Open(f)
if err != nil {
panic("Unable to open target: \"" + f + "\"." + err.Error())
}
updInfo := elfFile.Section(".upd_info")
sectionData, err := updInfo.Data()
if err != nil {
return false
}
strEnd := bytes.Index(sectionData, []byte("\000"))
return updInfo != nil && strEnd != -1 && strEnd != 0
}
// Remove function helps to remove an appimage, given its executable name
// with which it was registered
func Remove(options types.RemoveOptions, config config.Store) error {
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return nil
}
bar := tui.NewProgressBar(7, "r")
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return err
}
bar.Add(1)
err = json.Unmarshal(indexBytes, app)
if err != nil {
return err
}
if app.IconPath != "" {
logger.Debugf("Removing thumbnail, %s", app.IconPath)
os.Remove(app.IconPath)
}
bar.Add(1)
if app.IconPathHicolor != "" {
logger.Debugf("Removing symlink to hicolor theme, %s", app.IconPathHicolor)
os.Remove(app.IconPathHicolor)
}
bar.Add(1)
if app.DesktopFile != "" {
logger.Debugf("Removing desktop file, %s", app.DesktopFile)
os.Remove(app.DesktopFile)
}
bar.Add(1)
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if helpers.CheckIfFileExists(binFile) {
binAbsPath, err := filepath.EvalSymlinks(binFile)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
_ = os.Remove(binFile)
}
}
bar.Add(1)
logger.Debugf("Removing appimage, %s", app.Filepath)
_ = os.Remove(app.Filepath)
bar.Add(1)
logger.Debugf("Removing index file, %s", indexFile)
_ = os.Remove(indexFile)
bar.Add(1)
bar.Finish()
fmt.Printf("\n")
fmt.Printf("✅ %s removed successfully\n", app.Executable)
logger.Debugf("Removing all files completed successfully")
return bar.Finish()
}
| {
var apps []string
err := filepath.Walk(zapConfig.IndexStore, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return err
}
appName := ""
if index {
appName = path
} else {
appName = filepath.Base(path)
appName = strings.TrimSuffix(appName, ".json")
}
apps = append(apps, appName)
return err
})
return apps, err
} | identifier_body |
utils.go | package appimage
import (
"bytes"
"debug/elf"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
au "github.com/srevinsaju/appimage-update"
"github.com/srevinsaju/zap/config"
"github.com/srevinsaju/zap/index"
"github.com/srevinsaju/zap/internal/helpers"
"github.com/srevinsaju/zap/tui"
"github.com/srevinsaju/zap/types"
)
func List(zapConfig config.Store, index bool) ([]string, error) {
var apps []string
err := filepath.Walk(zapConfig.IndexStore, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return err
}
appName := ""
if index {
appName = path
} else {
appName = filepath.Base(path)
appName = strings.TrimSuffix(appName, ".json")
}
apps = append(apps, appName)
return err
})
return apps, err
}
func Install(options types.InstallOptions, config config.Store) error {
var asset types.ZapDlAsset
var err error
sourceIdentifier := ""
sourceSlug := ""
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
// check if the app is already installed
// if it is, do not continue
if helpers.CheckIfFileExists(indexFile) && !options.UpdateInplace {
fmt.Printf("%s is already installed \n", tui.Yellow(options.Executable))
return nil
} else if helpers.CheckIfFileExists(indexFile) {
// has the user requested to update the app in-place?
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.RemovePreviousVersions {
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.FromGithub {
asset, err = index.GitHubSurveyUserReleases(options, config)
sourceSlug = options.From
sourceIdentifier = SourceGitHub
if err != nil {
return err
}
} else if options.From == "" {
sourceIdentifier = SourceZapIndex
sourceSlug = options.Name
asset, err = index.ZapSurveyUserReleases(options, config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndIns | es.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return app, nil
}
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return app, err
}
err = json.Unmarshal(indexBytes, app)
if err != nil {
return app, err
}
if !options.UseAppImageUpdate || !checkIfUpdateInformationExists(app.Filepath) {
logger.Debug("This app has no update information embedded")
// the appimage does nofalset contain update information
// we need to fetch the metadata from the index
if app.Source.Identifier == SourceGitHub {
logger.Debug("Fallback to GitHub API call from installation method")
installOptions := types.InstallOptions{
Name: app.Executable,
From: app.Source.Meta.Slug,
Executable: strings.Trim(app.Executable, " "),
FromGithub: true,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else if app.Source.Identifier == SourceZapIndex {
logger.Debug("Fallback to zap index from appimage.github.io")
installOptions := types.InstallOptions{
Name: app.Executable,
From: "",
Executable: strings.Trim(app.Executable, " "),
FromGithub: false,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else {
if options.Silent {
logger.Warn("%s has no update information. " +
"Please ask the AppImage author to include updateinformation for the best experience. " +
"Skipping.")
return nil, nil
} else {
return nil, errors.New("appimage has no update information")
}
}
}
logger.Debugf("Creating new updater instance from %s", app.Filepath)
updater, err := au.NewUpdaterFor(app.Filepath)
if err != nil {
return app, err
}
logger.Debugf("Checking for updates")
hasUpdates, err := updater.Lookup()
if err != nil {
return app, err
}
if !hasUpdates {
return app, errors.New("up-to-date")
}
logger.Debugf("Downloading updates for %s", app.Executable)
newFileName, err := updater.Download()
fmt.Print("\n")
app.Filepath = newFileName
_ = os.Remove(app.IconPath)
_ = os.Remove(app.DesktopFile)
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
if err != nil {
return app, err
}
logger.Debug("Saving new index as JSON")
newIdxBytes, err := json.Marshal(*app)
if err != nil {
return app, err
}
logger.Debugf("Writing to %s", indexFile)
err = ioutil.WriteFile(indexFile, newIdxBytes, 0644)
if err != nil {
return app, err
}
return app, nil
}
// checkIfUpdateInformationExists checks if the appimage contains Update Information
// adapted directly from https://github.com/AppImageCrafters/appimage-update
func checkIfUpdateInformationExists(f string) bool {
elfFile, err := elf.Open(f)
if err != nil {
panic("Unable to open target: \"" + f + "\"." + err.Error())
}
updInfo := elfFile.Section(".upd_info")
sectionData, err := updInfo.Data()
if err != nil {
return false
}
strEnd := bytes.Index(sectionData, []byte("\000"))
return updInfo != nil && strEnd != -1 && strEnd != 0
}
// Remove function helps to remove an appimage, given its executable name
// with which it was registered
func Remove(options types.RemoveOptions, config config.Store) error {
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return nil
}
bar := tui.NewProgressBar(7, "r")
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return err
}
bar.Add(1)
err = json.Unmarshal(indexBytes, app)
if err != nil {
return err
}
if app.IconPath != "" {
logger.Debugf("Removing thumbnail, %s", app.IconPath)
os.Remove(app.IconPath)
}
bar.Add(1)
if app.IconPathHicolor != "" {
logger.Debugf("Removing symlink to hicolor theme, %s", app.IconPathHicolor)
os.Remove(app.IconPathHicolor)
}
bar.Add(1)
if app.DesktopFile != "" {
logger.Debugf("Removing desktop file, %s", app.DesktopFile)
os.Remove(app.DesktopFile)
}
bar.Add(1)
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if helpers.CheckIfFileExists(binFile) {
binAbsPath, err := filepath.EvalSymlinks(binFile)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
_ = os.Remove(binFile)
}
}
bar.Add(1)
logger.Debugf("Removing appimage, %s", app.Filepath)
_ = os.Remove(app.Filepath)
bar.Add(1)
logger.Debugf("Removing index file, %s", indexFile)
_ = os.Remove(indexFile)
bar.Add(1)
bar.Finish()
fmt.Printf("\n")
fmt.Printf("✅ %s removed successfully\n", app.Executable)
logger.Debugf("Removing all files completed successfully")
return bar.Finish()
}
| tall(options typ | identifier_name |
utils.go | package appimage
import (
"bytes"
"debug/elf"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
au "github.com/srevinsaju/appimage-update"
"github.com/srevinsaju/zap/config"
"github.com/srevinsaju/zap/index"
"github.com/srevinsaju/zap/internal/helpers"
"github.com/srevinsaju/zap/tui"
"github.com/srevinsaju/zap/types"
)
func List(zapConfig config.Store, index bool) ([]string, error) {
var apps []string
err := filepath.Walk(zapConfig.IndexStore, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return err
}
appName := ""
if index {
appName = path
} else {
appName = filepath.Base(path)
appName = strings.TrimSuffix(appName, ".json")
}
apps = append(apps, appName)
return err
})
return apps, err
}
func Install(options types.InstallOptions, config config.Store) error {
var asset types.ZapDlAsset
var err error
sourceIdentifier := ""
sourceSlug := ""
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
// check if the app is already installed
// if it is, do not continue
if helpers.CheckIfFileExists(indexFile) && !options.UpdateInplace {
fmt.Printf("%s is already installed \n", tui.Yellow(options.Executable))
return nil
} else if helpers.CheckIfFileExists(indexFile) {
// has the user requested to update the app in-place?
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.RemovePreviousVersions {
err := Remove(options.ToRemoveOptions(), config)
if err != nil {
return err
}
}
if options.FromGithub {
asset, err = index.GitHubSurveyUserReleases(options, config)
sourceSlug = options.From
sourceIdentifier = SourceGitHub
if err != nil {
return err
}
} else if options.From == "" {
sourceIdentifier = SourceZapIndex
sourceSlug = options.Name
asset, err = index.ZapSurveyUserReleases(options, config)
if err != nil {
return err
}
} else {
sourceIdentifier = SourceDirectURL
sourceSlug = options.From
// if the from argument is without the file:// protocol, match that
if helpers.CheckIfFileExists(sourceSlug) {
sourceSlug, err = filepath.Abs(sourceSlug)
if err != nil {
return err
}
sourceSlug = fmt.Sprintf("file://%s", sourceSlug)
}
asset = types.ZapDlAsset{
Name: options.Executable,
Download: sourceSlug,
Size: "(unknown)",
}
}
if !options.Silent {
// let the user know what is going to happen next
fmt.Printf("Downloading %s of size %s. \n", tui.Green(asset.Name), tui.Yellow(asset.Size))
confirmDownload := false
confirmDownloadPrompt := &survey.Confirm{
Message: "Proceed?",
}
err = survey.AskOne(confirmDownloadPrompt, &confirmDownload)
if err != nil {
return err
} else if !confirmDownload {
return errors.New("aborting on user request")
}
}
logger.Debugf("Connecting to %s", asset.Download)
targetAppImagePath := path.Join(config.LocalStore, asset.GetBaseName())
targetAppImagePath, err = filepath.Abs(targetAppImagePath)
if err != nil {
return err
}
logger.Debugf("Target file path %s", targetAppImagePath)
if strings.HasPrefix(asset.Download, "file://") {
logger.Debug("file:// protocol detected, copying the file")
sourceFile := strings.Replace(asset.Download, "file://", "", 1)
_, err = helpers.CopyFile(sourceFile, targetAppImagePath)
if err != nil {
return err
}
err := os.Chmod(targetAppImagePath, 0755)
if err != nil {
return err
}
} else {
err = tui.DownloadFileWithProgressBar(asset.Download, targetAppImagePath, options.Executable)
if err != nil {
return err
}
}
app := &AppImage{Filepath: targetAppImagePath, Executable: options.Executable}
if options.Executable == "" {
app.Executable = options.Executable
}
app.Source = Source{
Identifier: sourceIdentifier,
Meta: SourceMetadata{
Slug: sourceSlug,
CrawledOn: time.Now().String(),
},
}
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
indexBytes, err := json.Marshal(*app)
if err != nil {
return err
}
indexFile = fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Writing JSON index to %s", indexFile)
err = ioutil.WriteFile(indexFile, indexBytes, 0644)
if err != nil {
return err
}
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if !helpers.CheckIfDirectoryExists(binDir) {
err = os.MkdirAll(binDir, 0o755)
if err != nil {
return err
}
}
if helpers.CheckIfSymlinkExists(binFile) {
logger.Debugf("%s file exists. Attempting to find path", binFile)
binAbsPath, err := filepath.EvalSymlinks(binFile)
logger.Debugf("%s file is evaluated to %s", binFile, binAbsPath)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
logger.Debugf("%s is a previously installed symlink because of zap. Attempting to remove it", binFile)
err := os.Remove(binFile)
if err != nil {
logger.Warn("Failed to remove the symlink. %s", err)
}
} else if err == nil | else {
// the file is probably a symlink, but just doesnt resolve properly
// we can safely remove it
// make sure we remove the file first to prevent conflicts
logger.Debugf("Failed to evaluate target of symlink")
logger.Debugf("Attempting to remove the symlink regardless")
err := os.Remove(binFile)
if err != nil {
logger.Debugf("Failed to remove symlink: %s", err)
}
}
}
if !strings.Contains(os.Getenv("PATH"), binDir) {
logger.Warnf("The app %s are installed in '%s' which is not on PATH.", options.Executable, binDir)
logger.Warnf("Consider adding this directory to PATH. " +
"See https://linuxize.com/post/how-to-add-directory-to-path-in-linux/")
}
logger.Debugf("Creating symlink to %s", binFile)
err = os.Symlink(targetAppImagePath, binFile)
if err != nil {
return err
}
// <- finished
logger.Debug("Completed all tasks")
fmt.Printf("%s installed successfully ✨\n", app.Executable)
return nil
}
// Upgrade method helps to update multiple apps without asking users for manual input
func Upgrade(config config.Store, silent bool) ([]string, error) {
apps, err := List(config, false)
var updatedApps []string
if err != nil {
return updatedApps, err
}
for i := range apps {
appsFormatted := fmt.Sprintf("[%s]", apps[i])
fmt.Printf("%s%s Checking for updates\n", tui.Blue("[update]"), tui.Yellow(appsFormatted))
options := types.Options{
Name: apps[i],
Executable: apps[i],
Silent: silent,
}
_, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s%s AppImage is up to date.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
} else {
fmt.Printf("%s%s failed to update, %s\n", tui.Blue("[update]"),
tui.Red(appsFormatted), tui.Yellow(err))
}
} else {
fmt.Printf("%s%s Updated.\n", tui.Blue("[update]"), tui.Green(appsFormatted))
updatedApps = append(updatedApps, apps[i])
}
}
fmt.Println("🚀 Done.")
return updatedApps, nil
}
// Update method is a safe wrapper script which exposes update to the Command Line interface
// also handles those appimages which are up to date
func Update(options types.Options, config config.Store) error {
app, err := update(options, config)
if err != nil {
if err.Error() == "up-to-date" {
fmt.Printf("%s already up to date.\n", tui.Blue("[update]"))
return nil
} else {
return err
}
}
fmt.Printf("⚡️ AppImage saved as %s \n", tui.Green(app.Filepath))
fmt.Println("🚀 Done.")
return nil
}
// RemoveAndInstall helps to remove the AppImage first and then reinstall the appimage.
// this is particularly used in updating the AppImages from GitHub and Zap Index when
// the update information is missing
func RemoveAndInstall(options types.InstallOptions, config config.Store, app *AppImage) (*AppImage, error) {
// for github releases, we have to force the removal of the old
// appimage before continuing, because there is no verification
// of the method which can be used to check if the appimage is up to date
// or not.
err := Remove(types.RemoveOptions{Executable: app.Executable}, config)
if err != nil {
return nil, err
}
err = Install(options, config)
if err != nil {
return nil, err
}
// after installing, we need to resolve the name of the new app
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, app.Executable)
app.Filepath, err = filepath.EvalSymlinks(binFile)
if err != nil {
logger.Fatalf("Failed to resolve symlink to %s. E: %s", binDir, err)
return nil, err
}
return app, err
}
func update(options types.Options, config config.Store) (*AppImage, error) {
logger.Debugf("Bootstrapping updater for %s", options.Name)
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return app, nil
}
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return app, err
}
err = json.Unmarshal(indexBytes, app)
if err != nil {
return app, err
}
if !options.UseAppImageUpdate || !checkIfUpdateInformationExists(app.Filepath) {
logger.Debug("This app has no update information embedded")
// the appimage does nofalset contain update information
// we need to fetch the metadata from the index
if app.Source.Identifier == SourceGitHub {
logger.Debug("Fallback to GitHub API call from installation method")
installOptions := types.InstallOptions{
Name: app.Executable,
From: app.Source.Meta.Slug,
Executable: strings.Trim(app.Executable, " "),
FromGithub: true,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else if app.Source.Identifier == SourceZapIndex {
logger.Debug("Fallback to zap index from appimage.github.io")
installOptions := types.InstallOptions{
Name: app.Executable,
From: "",
Executable: strings.Trim(app.Executable, " "),
FromGithub: false,
Silent: options.Silent,
}
return RemoveAndInstall(installOptions, config, app)
} else {
if options.Silent {
logger.Warn("%s has no update information. " +
"Please ask the AppImage author to include updateinformation for the best experience. " +
"Skipping.")
return nil, nil
} else {
return nil, errors.New("appimage has no update information")
}
}
}
logger.Debugf("Creating new updater instance from %s", app.Filepath)
updater, err := au.NewUpdaterFor(app.Filepath)
if err != nil {
return app, err
}
logger.Debugf("Checking for updates")
hasUpdates, err := updater.Lookup()
if err != nil {
return app, err
}
if !hasUpdates {
return app, errors.New("up-to-date")
}
logger.Debugf("Downloading updates for %s", app.Executable)
newFileName, err := updater.Download()
fmt.Print("\n")
app.Filepath = newFileName
_ = os.Remove(app.IconPath)
_ = os.Remove(app.DesktopFile)
app.ExtractThumbnail(config.IconStore)
app.ProcessDesktopFile(config)
if err != nil {
return app, err
}
logger.Debug("Saving new index as JSON")
newIdxBytes, err := json.Marshal(*app)
if err != nil {
return app, err
}
logger.Debugf("Writing to %s", indexFile)
err = ioutil.WriteFile(indexFile, newIdxBytes, 0644)
if err != nil {
return app, err
}
return app, nil
}
// checkIfUpdateInformationExists checks if the appimage contains Update Information
// adapted directly from https://github.com/AppImageCrafters/appimage-update
func checkIfUpdateInformationExists(f string) bool {
elfFile, err := elf.Open(f)
if err != nil {
panic("Unable to open target: \"" + f + "\"." + err.Error())
}
updInfo := elfFile.Section(".upd_info")
sectionData, err := updInfo.Data()
if err != nil {
return false
}
strEnd := bytes.Index(sectionData, []byte("\000"))
return updInfo != nil && strEnd != -1 && strEnd != 0
}
// Remove function helps to remove an appimage, given its executable name
// with which it was registered
func Remove(options types.RemoveOptions, config config.Store) error {
app := &AppImage{}
indexFile := fmt.Sprintf("%s.json", path.Join(config.IndexStore, options.Executable))
logger.Debugf("Checking if %s exists", indexFile)
if !helpers.CheckIfFileExists(indexFile) {
fmt.Printf("%s is not installed \n", tui.Yellow(options.Executable))
return nil
}
bar := tui.NewProgressBar(7, "r")
logger.Debugf("Unmarshalling JSON from %s", indexFile)
indexBytes, err := ioutil.ReadFile(indexFile)
if err != nil {
return err
}
bar.Add(1)
err = json.Unmarshal(indexBytes, app)
if err != nil {
return err
}
if app.IconPath != "" {
logger.Debugf("Removing thumbnail, %s", app.IconPath)
os.Remove(app.IconPath)
}
bar.Add(1)
if app.IconPathHicolor != "" {
logger.Debugf("Removing symlink to hicolor theme, %s", app.IconPathHicolor)
os.Remove(app.IconPathHicolor)
}
bar.Add(1)
if app.DesktopFile != "" {
logger.Debugf("Removing desktop file, %s", app.DesktopFile)
os.Remove(app.DesktopFile)
}
bar.Add(1)
binDir := path.Join(xdg.Home, ".local", "bin")
binFile := path.Join(binDir, options.Executable)
if helpers.CheckIfFileExists(binFile) {
binAbsPath, err := filepath.EvalSymlinks(binFile)
if err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {
// this link points to config.LocalStore, where all AppImages are stored
// I guess we need to remove them, no asking and all
// make sure we remove the file first to prevent conflicts in future
_ = os.Remove(binFile)
}
}
bar.Add(1)
logger.Debugf("Removing appimage, %s", app.Filepath)
_ = os.Remove(app.Filepath)
bar.Add(1)
logger.Debugf("Removing index file, %s", indexFile)
_ = os.Remove(indexFile)
bar.Add(1)
bar.Finish()
fmt.Printf("\n")
fmt.Printf("✅ %s removed successfully\n", app.Executable)
logger.Debugf("Removing all files completed successfully")
return bar.Finish()
}
| {
// this is some serious app which shares the same name
// as that of the target appimage
// we dont want users to be confused tbh
// so we need to ask them which of them, they would like to keep
logger.Debug("Detected another app which is not installed by zap. Refusing to remove")
// TODO: add a user prompt
logger.Fatalf("%s already exists. ", binFile)
} | conditional_block |
student-layout.component.ts | import {Component, ElementRef, OnInit, ViewChild} from '@angular/core';
import {animate, AUTO_STYLE, state, style, transition, trigger} from '@angular/animations';
import {studentMenuItems} from '../../shared/menu-items/student-menu';
import { DataserviceService } from 'src/app/dataservice/dataservice.service';
import { MyaccountService } from 'src/app/dataservice/myaccount.service';
import { ToastrService } from 'ngx-toastr';
import * as moment from 'moment';
@Component({
selector: 'app-student-layout',
templateUrl: './student-layout.component.html',
styleUrls: ['./student-layout.component.css'],
animations: [
trigger('mobileMenuTop', [
state('no-block, void',
style({
overflow: 'hidden',
height: '0px',
})
),
state('yes-block',
style({
height: AUTO_STYLE,
})
),
transition('no-block <=> yes-block', [
animate('400ms ease-in-out')
])
]),
trigger('slideInOut', [
state('in', style({
transform: 'translate3d(0, 0, 0)'
})),
state('out', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('in => out', animate('400ms ease-in-out')),
transition('out => in', animate('400ms ease-in-out'))
]),
trigger('slideOnOff', [
state('on', style({
transform: 'translate3d(0, 0, 0)'
})),
state('off', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('on => off', animate('400ms ease-in-out')),
transition('off => on', animate('400ms ease-in-out'))
]),
trigger('fadeInOutTranslate', [
transition(':enter', [
style({opacity: 0}),
animate('400ms ease-in-out', style({opacity: 1}))
]),
transition(':leave', [
style({transform: 'translate(0)'}),
animate('400ms ease-in-out', style({opacity: 0}))
])
])
]
})
export class StudentLayoutComponent implements OnInit {
navType: string; /* st1, st2(default), st3, st4 */
themeLayout: string; /* vertical(default) */
layoutType: string; /* dark, light */
verticalPlacement: string; /* left(default), right */
verticalLayout: string; /* wide(default), box */
deviceType: string; /* desktop(default), tablet, mobile */
verticalNavType: string; /* expanded(default), offcanvas */
verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
se | ype: string) {
this.layoutType = type;
if (type === 'dark') {
this.headerTheme = 'theme6';
this.navBarTheme = 'theme1';
this.logoTheme = 'theme6';
document.querySelector('body').classList.add('dark');
} else {
this.headerTheme = 'theme1';
this.navBarTheme = 'themelight1';
this.logoTheme = 'theme1';
document.querySelector('body').classList.remove('dark');
}
}
setNavBarTheme(theme: string) {
if (theme === 'themelight1') {
this.navBarTheme = 'themelight1';
} else {
this.navBarTheme = 'theme1';
}
}
urlToObject = async (imageName) => {
const response = await fetch((this.profile_image_api + imageName));
if(response.ok) {
const blob = await response.blob();
// console.log(blob)
// const file = new File([blob], imageName, {type: blob.type});
return blob;
} else {
return null;
}
}
logout(){
this.ds.logout('user');
}
}
| tLayoutType(t | identifier_name |
student-layout.component.ts | import {Component, ElementRef, OnInit, ViewChild} from '@angular/core';
import {animate, AUTO_STYLE, state, style, transition, trigger} from '@angular/animations';
import {studentMenuItems} from '../../shared/menu-items/student-menu';
import { DataserviceService } from 'src/app/dataservice/dataservice.service';
import { MyaccountService } from 'src/app/dataservice/myaccount.service';
import { ToastrService } from 'ngx-toastr';
import * as moment from 'moment';
@Component({
selector: 'app-student-layout',
templateUrl: './student-layout.component.html',
styleUrls: ['./student-layout.component.css'],
animations: [
trigger('mobileMenuTop', [
state('no-block, void',
style({
overflow: 'hidden',
height: '0px',
})
),
state('yes-block',
style({
height: AUTO_STYLE,
})
),
transition('no-block <=> yes-block', [
animate('400ms ease-in-out')
])
]),
trigger('slideInOut', [
state('in', style({
transform: 'translate3d(0, 0, 0)'
})),
state('out', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('in => out', animate('400ms ease-in-out')),
transition('out => in', animate('400ms ease-in-out'))
]),
trigger('slideOnOff', [
state('on', style({
transform: 'translate3d(0, 0, 0)'
})),
state('off', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('on => off', animate('400ms ease-in-out')),
transition('off => on', animate('400ms ease-in-out'))
]),
trigger('fadeInOutTranslate', [
transition(':enter', [
style({opacity: 0}),
animate('400ms ease-in-out', style({opacity: 1}))
]),
transition(':leave', [
style({transform: 'translate(0)'}),
animate('400ms ease-in-out', style({opacity: 0}))
])
])
]
})
export class StudentLayoutComponent implements OnInit {
navType: string; /* st1, st2(default), st3, st4 */
themeLayout: string; /* vertical(default) */
layoutType: string; /* dark, light */
verticalPlacement: string; /* left(default), right */
verticalLayout: string; /* wide(default), box */
deviceType: string; /* desktop(default), tablet, mobile */
verticalNavType: string; /* expanded(default), offcanvas */
verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
} | }
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
setLayoutType(type: string) {
this.layoutType = type;
if (type === 'dark') {
this.headerTheme = 'theme6';
this.navBarTheme = 'theme1';
this.logoTheme = 'theme6';
document.querySelector('body').classList.add('dark');
} else {
this.headerTheme = 'theme1';
this.navBarTheme = 'themelight1';
this.logoTheme = 'theme1';
document.querySelector('body').classList.remove('dark');
}
}
setNavBarTheme(theme: string) {
if (theme === 'themelight1') {
this.navBarTheme = 'themelight1';
} else {
this.navBarTheme = 'theme1';
}
}
urlToObject = async (imageName) => {
const response = await fetch((this.profile_image_api + imageName));
if(response.ok) {
const blob = await response.blob();
// console.log(blob)
// const file = new File([blob], imageName, {type: blob.type});
return blob;
} else {
return null;
}
}
logout(){
this.ds.logout('user');
}
} | }
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn; | random_line_split |
student-layout.component.ts | import {Component, ElementRef, OnInit, ViewChild} from '@angular/core';
import {animate, AUTO_STYLE, state, style, transition, trigger} from '@angular/animations';
import {studentMenuItems} from '../../shared/menu-items/student-menu';
import { DataserviceService } from 'src/app/dataservice/dataservice.service';
import { MyaccountService } from 'src/app/dataservice/myaccount.service';
import { ToastrService } from 'ngx-toastr';
import * as moment from 'moment';
@Component({
selector: 'app-student-layout',
templateUrl: './student-layout.component.html',
styleUrls: ['./student-layout.component.css'],
animations: [
trigger('mobileMenuTop', [
state('no-block, void',
style({
overflow: 'hidden',
height: '0px',
})
),
state('yes-block',
style({
height: AUTO_STYLE,
})
),
transition('no-block <=> yes-block', [
animate('400ms ease-in-out')
])
]),
trigger('slideInOut', [
state('in', style({
transform: 'translate3d(0, 0, 0)'
})),
state('out', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('in => out', animate('400ms ease-in-out')),
transition('out => in', animate('400ms ease-in-out'))
]),
trigger('slideOnOff', [
state('on', style({
transform: 'translate3d(0, 0, 0)'
})),
state('off', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('on => off', animate('400ms ease-in-out')),
transition('off => on', animate('400ms ease-in-out'))
]),
trigger('fadeInOutTranslate', [
transition(':enter', [
style({opacity: 0}),
animate('400ms ease-in-out', style({opacity: 1}))
]),
transition(':leave', [
style({transform: 'translate(0)'}),
animate('400ms ease-in-out', style({opacity: 0}))
])
])
]
})
export class StudentLayoutComponent implements OnInit {
navType: string; /* st1, st2(default), st3, st4 */
themeLayout: string; /* vertical(default) */
layoutType: string; /* dark, light */
verticalPlacement: string; /* left(default), right */
verticalLayout: string; /* wide(default), box */
deviceType: string; /* desktop(default), tablet, mobile */
verticalNavType: string; /* expanded(default), offcanvas */
verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
| setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
setLayoutType(type: string) {
this.layoutType = type;
if (type === 'dark') {
this.headerTheme = 'theme6';
this.navBarTheme = 'theme1';
this.logoTheme = 'theme6';
document.querySelector('body').classList.add('dark');
} else {
this.headerTheme = 'theme1';
this.navBarTheme = 'themelight1';
this.logoTheme = 'theme1';
document.querySelector('body').classList.remove('dark');
}
}
setNavBarTheme(theme: string) {
if (theme === 'themelight1') {
this.navBarTheme = 'themelight1';
} else {
this.navBarTheme = 'theme1';
}
}
urlToObject = async (imageName) => {
const response = await fetch((this.profile_image_api + imageName));
if(response.ok) {
const blob = await response.blob();
// console.log(blob)
// const file = new File([blob], imageName, {type: blob.type});
return blob;
} else {
return null;
}
}
logout(){
this.ds.logout('user');
}
}
| this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
| identifier_body |
student-layout.component.ts | import {Component, ElementRef, OnInit, ViewChild} from '@angular/core';
import {animate, AUTO_STYLE, state, style, transition, trigger} from '@angular/animations';
import {studentMenuItems} from '../../shared/menu-items/student-menu';
import { DataserviceService } from 'src/app/dataservice/dataservice.service';
import { MyaccountService } from 'src/app/dataservice/myaccount.service';
import { ToastrService } from 'ngx-toastr';
import * as moment from 'moment';
@Component({
selector: 'app-student-layout',
templateUrl: './student-layout.component.html',
styleUrls: ['./student-layout.component.css'],
animations: [
trigger('mobileMenuTop', [
state('no-block, void',
style({
overflow: 'hidden',
height: '0px',
})
),
state('yes-block',
style({
height: AUTO_STYLE,
})
),
transition('no-block <=> yes-block', [
animate('400ms ease-in-out')
])
]),
trigger('slideInOut', [
state('in', style({
transform: 'translate3d(0, 0, 0)'
})),
state('out', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('in => out', animate('400ms ease-in-out')),
transition('out => in', animate('400ms ease-in-out'))
]),
trigger('slideOnOff', [
state('on', style({
transform: 'translate3d(0, 0, 0)'
})),
state('off', style({
transform: 'translate3d(100%, 0, 0)'
})),
transition('on => off', animate('400ms ease-in-out')),
transition('off => on', animate('400ms ease-in-out'))
]),
trigger('fadeInOutTranslate', [
transition(':enter', [
style({opacity: 0}),
animate('400ms ease-in-out', style({opacity: 1}))
]),
transition(':leave', [
style({transform: 'translate(0)'}),
animate('400ms ease-in-out', style({opacity: 0}))
])
])
]
})
export class StudentLayoutComponent implements OnInit {
navType: string; /* st1, st2(default), st3, st4 */
themeLayout: string; /* vertical(default) */
layoutType: string; /* dark, light */
verticalPlacement: string; /* left(default), right */
verticalLayout: string; /* wide(default), box */
deviceType: string; /* desktop(default), tablet, mobile */
verticalNavType: string; /* expanded(default), offcanvas */
verticalEffect: string; /* shrink(default), push, overlay */
vNavigationView: string; /* view1(default) */
pcodedHeaderPosition: string; /* fixed(default), relative*/
pcodedSidebarPosition: string; /* fixed(default), absolute*/
headerTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
logoTheme: string; /* theme1(default), theme2, theme3, theme4, theme5, theme6 */
innerHeight: string;
windowWidth: number;
toggleOn: boolean;
headerFixedMargin: string;
navBarTheme: string; /* theme1, themelight1(default)*/
activeItemTheme: string; /* theme1, theme2, theme3, theme4(default), ..., theme11, theme12 */
isCollapsedMobile: string;
isCollapsedSideBar: string;
chatToggle: string;
chatToggleInverse: string;
chatInnerToggle: string;
chatInnerToggleInverse: string;
menuTitleTheme: string; /* theme1, theme2, theme3, theme4, theme5(default), theme6 */
itemBorder: boolean;
itemBorderStyle: string; /* none(default), solid, dotted, dashed */
subItemBorder: boolean;
subItemIcon: string; /* style1, style2, style3, style4, style5, style6(default) */
dropDownIcon: string; /* style1(default), style2, style3 */
configOpenRightBar: string;
isSidebarChecked: boolean;
isHeaderChecked: boolean;
@ViewChild('searchFriends', /* TODO: add static flag */ {static: false}) search_friends: ElementRef;
public config: any;
logininfo:any;
profiledata = {
address:'',
dateofbirth:'',
email:'',
gender:'',
mobile:'',
name:'',
zip_code:''
}
notificationarray = [];
profile_image_api:any;
role_type:any;
profileimage_name:any;
profileimage_url:any;
constructor(public menuItems: studentMenuItems,private ds:DataserviceService,private toastr:ToastrService,
private myacser:MyaccountService) {
console.log(this.menuItems.getAll());
this.logininfo = JSON.parse(sessionStorage.getItem('login_details'));
this.role_type = this.logininfo['role_type'];
this.profile_image_api = this.myacser.getprofileimageAPI();
this.navType = 'st5';
this.themeLayout = 'vertical';
this.vNavigationView = 'view1';
this.verticalPlacement = 'left';
this.verticalLayout = 'wide';
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
this.pcodedHeaderPosition = 'fixed';
this.pcodedSidebarPosition = 'fixed';
this.headerTheme = 'theme1';
this.logoTheme = 'theme1';
this.toggleOn = true;
this.headerFixedMargin = '80px';
this.navBarTheme = 'themelight1';
this.activeItemTheme = 'theme4';
this.isCollapsedMobile = 'no-block';
this.isCollapsedSideBar = 'no-block';
this.chatToggle = 'out';
this.chatToggleInverse = 'in';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'on';
this.menuTitleTheme = 'theme5';
this.itemBorder = true;
this.itemBorderStyle = 'none';
this.subItemBorder = true;
this.subItemIcon = 'style6';
this.dropDownIcon = 'style1';
this.isSidebarChecked = true;
this.isHeaderChecked = true;
const scrollHeight = window.screen.height - 150;
this.innerHeight = scrollHeight + 'px';
this.windowWidth = window.innerWidth;
this.setMenuAttributes(this.windowWidth);
// dark
/*this.setLayoutType('dark');
this.headerTheme = 'theme5';
this.logoTheme = 'theme5';*/
// light-dark
/*this.setLayoutType('dark');
this.setNavBarTheme('themelight1');
this.navType = 'st2';*/
// dark-light
// this.setNavBarTheme('theme1');
// this.navType = 'st3';
this.myacser.getdata('myaccount/'+this.logininfo['user_id']).then(async res => {
if(res['status'] == 'success'){
console.log(res);
let data = res['data'][0];
var notification = res['notification_data'];
if(notification != undefined && notification != '' && notification.length > 0){
notification.forEach(n => {
console.log(n.created_on);
const dateTimeAgo = moment(n.created_on).fromNow();
console.log(dateTimeAgo); //> 6 minutes ago
this.notificationarray.push({id:n.id,image:this.profile_image_api+''+n.profile_image,name:n.sponsorfname+' '+n.sponsorlname,msg:'Hi '+n.name+' '+n.last_name+' your sponsor has paid ₹ '+n.paid+' sponsored',paid_on:dateTimeAgo})
})
}
this.profiledata.name = data.name+' '+data.last_name;
this.profiledata.gender = data.gender;
this.profiledata.dateofbirth = data.dateofbirth;
this.profiledata.address = data.address;
this.profiledata.email = data.email;
this.profiledata.mobile = data.mobile;
this.profiledata.zip_code = data.zip_code;
var profile_img = await this.urlToObject(data.profile_image);
if(profile_img != null){
this.profileimage_name = data.profile_image;
this.profileimage_url = this.profile_image_api+''+data.profile_image;
}
}
},error => {
console.log(error);
if(error['error']){
this.toastr.error(error['error'].message, 'Error', {
progressBar:true
});
return;
}
})
}
ngOnInit() {
this.setBackgroundPattern('pattern2');
}
// gotonotification(data){
// }
onResize(event) {
this.innerHeight = event.target.innerHeight + 'px';
/* menu responsive */
this.windowWidth = event.target.innerWidth;
let reSizeFlag = true;
if (this.deviceType === 'tablet' && this.windowWidth >= 768 && this.windowWidth <= 1024) {
reSizeFlag = false;
} else if (this.deviceType === 'mobile' && this.windowWidth < 768) {
reSizeFlag = false;
}
/* for check device */
if (reSizeFlag) {
this.setMenuAttributes(this.windowWidth);
}
}
setMenuAttributes(windowWidth) {
if (windowWidth >= 768 && windowWidth <= 1024) {
this.deviceType = 'tablet';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'push';
} else if (windowWidth < 768) {
this.deviceType = 'mobile';
this.verticalNavType = 'offcanvas';
this.verticalEffect = 'overlay';
} else {
this.deviceType = 'desktop';
this.verticalNavType = 'expanded';
this.verticalEffect = 'shrink';
}
}
toggleOpened() {
if (this.windowWidth < 768) {
this.toggleOn = this.verticalNavType === 'offcanvas' ? true : this.toggleOn;
}
this.verticalNavType = this.verticalNavType === 'expanded' ? 'offcanvas' : 'expanded';
}
onClickedOutside(e: Event) {
if (this.windowWidth < 768 && this.toggleOn && this.verticalNavType !== 'offcanvas') {
this.toggleOn = true;
this.verticalNavType = 'offcanvas';
}
}
onMobileMenu() {
this.isCollapsedMobile = this.isCollapsedMobile === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleChat() {
this.chatToggle = this.chatToggle === 'out' ? 'in' : 'out';
this.chatToggleInverse = this.chatToggleInverse === 'out' ? 'in' : 'out';
this.chatInnerToggle = 'off';
this.chatInnerToggleInverse = 'off';
}
toggleChatInner() {
this.chatInnerToggle = this.chatInnerToggle === 'off' ? 'on' : 'off';
this.chatInnerToggleInverse = this.chatInnerToggleInverse === 'off' ? 'on' : 'off';
}
searchFriendList(e: Event) {
const search = (this.search_friends.nativeElement.value).toLowerCase();
let search_input: string;
let search_parent: any;
const friendList = document.querySelectorAll('.userlist-box .media-body .chat-header');
Array.prototype.forEach.call(friendList, function(elements, index) {
search_input = (elements.innerHTML).toLowerCase();
search_parent = (elements.parentNode).parentNode;
if (search_input.indexOf(search) !== -1) {
search_parent.classList.add('show');
search_parent.classList.remove('hide');
} else {
search_parent.classList.add('hide');
search_parent.classList.remove('show');
}
});
}
toggleOpenedSidebar() {
this.isCollapsedSideBar = this.isCollapsedSideBar === 'yes-block' ? 'no-block' : 'yes-block';
}
toggleRightbar() {
this.configOpenRightBar = this.configOpenRightBar === 'open' ? '' : 'open';
}
setSidebarPosition() {
this.isSidebarChecked = !this.isSidebarChecked;
this.pcodedSidebarPosition = this.isSidebarChecked === true ? 'fixed' : 'absolute';
}
setHeaderPosition() {
this.isHeaderChecked = !this.isHeaderChecked;
this.pcodedHeaderPosition = this.isHeaderChecked === true ? 'fixed' : 'relative';
this.headerFixedMargin = this.isHeaderChecked === true ? '80px' : '';
}
setBackgroundPattern(pattern) {
document.querySelector('body').setAttribute('themebg-pattern', pattern);
}
setLayoutType(type: string) {
this.layoutType = type;
if (type === 'dark') {
this.headerTheme = 'theme6';
this.navBarTheme = 'theme1';
this.logoTheme = 'theme6';
document.querySelector('body').classList.add('dark');
} else {
this.headerTheme = 'theme1';
this.navBarTheme = 'themelight1';
this.logoTheme = 'theme1';
document.querySelector('body').classList.remove('dark');
}
}
setNavBarTheme(theme: string) {
if (theme === 'themelight1') {
| lse {
this.navBarTheme = 'theme1';
}
}
urlToObject = async (imageName) => {
const response = await fetch((this.profile_image_api + imageName));
if(response.ok) {
const blob = await response.blob();
// console.log(blob)
// const file = new File([blob], imageName, {type: blob.type});
return blob;
} else {
return null;
}
}
logout(){
this.ds.logout('user');
}
}
| this.navBarTheme = 'themelight1';
} e | conditional_block |
queue.go | package memberlist
import (
"math"
"sync"
"github.com/google/btree"
)
// TransmitLimitedQueue is used to queue messages to broadcast to
// the cluster (via gossip) but limits the number of transmits per
// message. It also prioritizes messages with lower transmit counts
// (hence newer messages).
type TransmitLimitedQueue struct {
// NumNodes returns the number of nodes in the cluster. This is
// used to determine the retransmit count, which is calculated
// based on the log of this.
NumNodes func() int
// RetransmitMult is the multiplier used to determine the maximum
// number of retransmissions attempted.
RetransmitMult int
mu sync.Mutex
tq *btree.BTree // stores *limitedBroadcast as btree.Item
tm map[string]*limitedBroadcast
idGen int64
}
type limitedBroadcast struct {
transmits int // btree-key[0]: Number of transmissions attempted.
msgLen int64 // btree-key[1]: copied from len(b.Message())
id int64 // btree-key[2]: unique incrementing id stamped at submission time
b Broadcast
name string // set if Broadcast is a NamedBroadcast
}
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" | else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
}
lessThan := &limitedBroadcast{
transmits: transmits + 1,
msgLen: math.MaxInt64,
id: math.MaxInt64,
}
var keep *limitedBroadcast
q.tq.AscendRange(greaterOrEqual, lessThan, func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Check if this is within our limits
if int64(len(cur.b.Message())) > free {
// If this happens it's a bug in the datastructure or
// surrounding use doing something like having len(Message())
// change over time. There's enough going on here that it's
// probably sane to just skip it and move on for now.
return true
}
keep = cur
return false
})
if keep == nil {
// No more items of an appropriate size in the tier.
transmits++
continue
}
msg := keep.b.Message()
// Add to slice to send
bytesUsed += overhead + len(msg)
toSend = append(toSend, msg)
// Check if we should stop transmission
q.deleteItem(keep)
if keep.transmits+1 >= transmitLimit {
keep.b.Finished()
} else {
// We need to bump this item down to another transmit tier, but
// because it would be in the same direction that we're walking the
// tiers, we will have to delay the reinsertion until we are
// finished our search. Otherwise we'll possibly re-add the message
// when we ascend to the next tier.
keep.transmits++
reinsert = append(reinsert, keep)
}
}
for _, cur := range reinsert {
q.addItem(cur)
}
return toSend
}
// NumQueued returns the number of queued messages
func (q *TransmitLimitedQueue) NumQueued() int {
q.mu.Lock()
defer q.mu.Unlock()
return q.lenLocked()
}
// lenLocked returns the length of the overall queue datastructure. You must
// hold the mutex.
func (q *TransmitLimitedQueue) lenLocked() int {
if q.tq == nil {
return 0
}
return q.tq.Len()
}
// Reset clears all the queued messages. Should only be used for tests.
func (q *TransmitLimitedQueue) Reset() {
q.mu.Lock()
defer q.mu.Unlock()
q.walkReadOnlyLocked(false, func(cur *limitedBroadcast) bool {
cur.b.Finished()
return true
})
q.tq = nil
q.tm = nil
q.idGen = 0
}
// Prune will retain the maxRetain latest messages, and the rest
// will be discarded. This can be used to prevent unbounded queue sizes
func (q *TransmitLimitedQueue) Prune(maxRetain int) {
q.mu.Lock()
defer q.mu.Unlock()
// Do nothing if queue size is less than the limit
for q.tq.Len() > maxRetain {
item := q.tq.Max()
if item == nil {
break
}
cur := item.(*limitedBroadcast)
cur.b.Finished()
q.deleteItem(cur)
}
}
| {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} | conditional_block |
queue.go | package memberlist
import (
"math"
"sync"
"github.com/google/btree"
)
// TransmitLimitedQueue is used to queue messages to broadcast to
// the cluster (via gossip) but limits the number of transmits per
// message. It also prioritizes messages with lower transmit counts
// (hence newer messages).
type TransmitLimitedQueue struct {
// NumNodes returns the number of nodes in the cluster. This is
// used to determine the retransmit count, which is calculated
// based on the log of this.
NumNodes func() int
// RetransmitMult is the multiplier used to determine the maximum
// number of retransmissions attempted.
RetransmitMult int
mu sync.Mutex
tq *btree.BTree // stores *limitedBroadcast as btree.Item
tm map[string]*limitedBroadcast
idGen int64
}
type limitedBroadcast struct {
transmits int // btree-key[0]: Number of transmissions attempted.
msgLen int64 // btree-key[1]: copied from len(b.Message())
id int64 // btree-key[2]: unique incrementing id stamped at submission time
b Broadcast
name string // set if Broadcast is a NamedBroadcast
}
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) | () (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
}
lessThan := &limitedBroadcast{
transmits: transmits + 1,
msgLen: math.MaxInt64,
id: math.MaxInt64,
}
var keep *limitedBroadcast
q.tq.AscendRange(greaterOrEqual, lessThan, func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Check if this is within our limits
if int64(len(cur.b.Message())) > free {
// If this happens it's a bug in the datastructure or
// surrounding use doing something like having len(Message())
// change over time. There's enough going on here that it's
// probably sane to just skip it and move on for now.
return true
}
keep = cur
return false
})
if keep == nil {
// No more items of an appropriate size in the tier.
transmits++
continue
}
msg := keep.b.Message()
// Add to slice to send
bytesUsed += overhead + len(msg)
toSend = append(toSend, msg)
// Check if we should stop transmission
q.deleteItem(keep)
if keep.transmits+1 >= transmitLimit {
keep.b.Finished()
} else {
// We need to bump this item down to another transmit tier, but
// because it would be in the same direction that we're walking the
// tiers, we will have to delay the reinsertion until we are
// finished our search. Otherwise we'll possibly re-add the message
// when we ascend to the next tier.
keep.transmits++
reinsert = append(reinsert, keep)
}
}
for _, cur := range reinsert {
q.addItem(cur)
}
return toSend
}
// NumQueued returns the number of queued messages
func (q *TransmitLimitedQueue) NumQueued() int {
q.mu.Lock()
defer q.mu.Unlock()
return q.lenLocked()
}
// lenLocked returns the length of the overall queue datastructure. You must
// hold the mutex.
func (q *TransmitLimitedQueue) lenLocked() int {
if q.tq == nil {
return 0
}
return q.tq.Len()
}
// Reset clears all the queued messages. Should only be used for tests.
func (q *TransmitLimitedQueue) Reset() {
q.mu.Lock()
defer q.mu.Unlock()
q.walkReadOnlyLocked(false, func(cur *limitedBroadcast) bool {
cur.b.Finished()
return true
})
q.tq = nil
q.tm = nil
q.idGen = 0
}
// Prune will retain the maxRetain latest messages, and the rest
// will be discarded. This can be used to prevent unbounded queue sizes
func (q *TransmitLimitedQueue) Prune(maxRetain int) {
q.mu.Lock()
defer q.mu.Unlock()
// Do nothing if queue size is less than the limit
for q.tq.Len() > maxRetain {
item := q.tq.Max()
if item == nil {
break
}
cur := item.(*limitedBroadcast)
cur.b.Finished()
q.deleteItem(cur)
}
}
| getTransmitRange | identifier_name |
queue.go | package memberlist
import (
"math"
"sync"
"github.com/google/btree"
)
// TransmitLimitedQueue is used to queue messages to broadcast to
// the cluster (via gossip) but limits the number of transmits per
// message. It also prioritizes messages with lower transmit counts
// (hence newer messages).
type TransmitLimitedQueue struct {
// NumNodes returns the number of nodes in the cluster. This is
// used to determine the retransmit count, which is calculated
// based on the log of this.
NumNodes func() int
// RetransmitMult is the multiplier used to determine the maximum
// number of retransmissions attempted.
RetransmitMult int
mu sync.Mutex
tq *btree.BTree // stores *limitedBroadcast as btree.Item
tm map[string]*limitedBroadcast
idGen int64
}
type limitedBroadcast struct {
transmits int // btree-key[0]: Number of transmissions attempted.
msgLen int64 // btree-key[1]: copied from len(b.Message())
id int64 // btree-key[2]: unique incrementing id stamped at submission time
b Broadcast
name string // set if Broadcast is a NamedBroadcast
}
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast |
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
}
// getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
}
lessThan := &limitedBroadcast{
transmits: transmits + 1,
msgLen: math.MaxInt64,
id: math.MaxInt64,
}
var keep *limitedBroadcast
q.tq.AscendRange(greaterOrEqual, lessThan, func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Check if this is within our limits
if int64(len(cur.b.Message())) > free {
// If this happens it's a bug in the datastructure or
// surrounding use doing something like having len(Message())
// change over time. There's enough going on here that it's
// probably sane to just skip it and move on for now.
return true
}
keep = cur
return false
})
if keep == nil {
// No more items of an appropriate size in the tier.
transmits++
continue
}
msg := keep.b.Message()
// Add to slice to send
bytesUsed += overhead + len(msg)
toSend = append(toSend, msg)
// Check if we should stop transmission
q.deleteItem(keep)
if keep.transmits+1 >= transmitLimit {
keep.b.Finished()
} else {
// We need to bump this item down to another transmit tier, but
// because it would be in the same direction that we're walking the
// tiers, we will have to delay the reinsertion until we are
// finished our search. Otherwise we'll possibly re-add the message
// when we ascend to the next tier.
keep.transmits++
reinsert = append(reinsert, keep)
}
}
for _, cur := range reinsert {
q.addItem(cur)
}
return toSend
}
// NumQueued returns the number of queued messages
func (q *TransmitLimitedQueue) NumQueued() int {
q.mu.Lock()
defer q.mu.Unlock()
return q.lenLocked()
}
// lenLocked returns the length of the overall queue datastructure. You must
// hold the mutex.
func (q *TransmitLimitedQueue) lenLocked() int {
if q.tq == nil {
return 0
}
return q.tq.Len()
}
// Reset clears all the queued messages. Should only be used for tests.
func (q *TransmitLimitedQueue) Reset() {
q.mu.Lock()
defer q.mu.Unlock()
q.walkReadOnlyLocked(false, func(cur *limitedBroadcast) bool {
cur.b.Finished()
return true
})
q.tq = nil
q.tm = nil
q.idGen = 0
}
// Prune will retain the maxRetain latest messages, and the rest
// will be discarded. This can be used to prevent unbounded queue sizes
func (q *TransmitLimitedQueue) Prune(maxRetain int) {
q.mu.Lock()
defer q.mu.Unlock()
// Do nothing if queue size is less than the limit
for q.tq.Len() > maxRetain {
item := q.tq.Max()
if item == nil {
break
}
cur := item.(*limitedBroadcast)
cur.b.Finished()
q.deleteItem(cur)
}
}
| {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
} | identifier_body |
queue.go | package memberlist
import (
"math"
"sync"
"github.com/google/btree"
)
// TransmitLimitedQueue is used to queue messages to broadcast to
// the cluster (via gossip) but limits the number of transmits per
// message. It also prioritizes messages with lower transmit counts
// (hence newer messages).
type TransmitLimitedQueue struct {
// NumNodes returns the number of nodes in the cluster. This is
// used to determine the retransmit count, which is calculated
// based on the log of this.
NumNodes func() int
// RetransmitMult is the multiplier used to determine the maximum
// number of retransmissions attempted.
RetransmitMult int
mu sync.Mutex
tq *btree.BTree // stores *limitedBroadcast as btree.Item
tm map[string]*limitedBroadcast
idGen int64
}
type limitedBroadcast struct {
transmits int // btree-key[0]: Number of transmissions attempted.
msgLen int64 // btree-key[1]: copied from len(b.Message())
id int64 // btree-key[2]: unique incrementing id stamped at submission time
b Broadcast
name string // set if Broadcast is a NamedBroadcast
}
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
//
// default ordering is
// - [transmits=0, ..., transmits=inf]
// - [transmits=0:len=999, ..., transmits=0:len=2, ...]
// - [transmits=0:len=999,id=999, ..., transmits=0:len=999:id=1, ...]
func (b *limitedBroadcast) Less(than btree.Item) bool {
o := than.(*limitedBroadcast)
if b.transmits < o.transmits {
return true
} else if b.transmits > o.transmits {
return false
}
if b.msgLen > o.msgLen {
return true
} else if b.msgLen < o.msgLen {
return false
}
return b.id > o.id
}
// for testing; emits in transmit order if reverse=false
func (q *TransmitLimitedQueue) orderedView(reverse bool) []*limitedBroadcast {
q.mu.Lock()
defer q.mu.Unlock()
out := make([]*limitedBroadcast, 0, q.lenLocked())
q.walkReadOnlyLocked(reverse, func(cur *limitedBroadcast) bool {
out = append(out, cur)
return true
})
return out
}
// walkReadOnlyLocked calls f for each item in the queue traversing it in
// natural order (by Less) when reverse=false and the opposite when true. You
// must hold the mutex.
//
// This method panics if you attempt to mutate the item during traversal. The
// underlying btree should also not be mutated during traversal.
func (q *TransmitLimitedQueue) walkReadOnlyLocked(reverse bool, f func(*limitedBroadcast) bool) {
if q.lenLocked() == 0 {
return
}
iter := func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
prevTransmits := cur.transmits
prevMsgLen := cur.msgLen
prevID := cur.id
keepGoing := f(cur)
if prevTransmits != cur.transmits || prevMsgLen != cur.msgLen || prevID != cur.id {
panic("edited queue while walking read only")
}
return keepGoing
}
if reverse {
q.tq.Descend(iter) // end with transmit 0
} else {
q.tq.Ascend(iter) // start with transmit 0
}
}
// Broadcast is something that can be broadcasted via gossip to
// the memberlist cluster.
type Broadcast interface {
// Invalidates checks if enqueuing the current broadcast
// invalidates a previous broadcast
Invalidates(b Broadcast) bool
// Returns a byte form of the message
Message() []byte
// Finished is invoked when the message will no longer
// be broadcast, either due to invalidation or to the
// transmit limit being reached
Finished()
}
// NamedBroadcast is an optional extension of the Broadcast interface that
// gives each message a unique string name, and that is used to optimize
//
// You shoud ensure that Invalidates() checks the same uniqueness as the
// example below:
//
// func (b *foo) Invalidates(other Broadcast) bool {
// nb, ok := other.(NamedBroadcast)
// if !ok {
// return false
// }
// return b.Name() == nb.Name()
// }
//
// Invalidates() isn't currently used for NamedBroadcasts, but that may change
// in the future.
type NamedBroadcast interface {
Broadcast
// The unique identity of this broadcast message.
Name() string
}
// UniqueBroadcast is an optional interface that indicates that each message is
// intrinsically unique and there is no need to scan the broadcast queue for
// duplicates.
//
// You should ensure that Invalidates() always returns false if implementing
// this interface. Invalidates() isn't currently used for UniqueBroadcasts, but
// that may change in the future.
type UniqueBroadcast interface {
Broadcast
// UniqueBroadcast is just a marker method for this interface.
UniqueBroadcast()
}
// QueueBroadcast is used to enqueue a broadcast
func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) {
q.queueBroadcast(b, 0)
}
// lazyInit initializes internal data structures the first time they are
// needed. You must already hold the mutex.
func (q *TransmitLimitedQueue) lazyInit() {
if q.tq == nil {
q.tq = btree.New(32)
}
if q.tm == nil {
q.tm = make(map[string]*limitedBroadcast)
}
}
// queueBroadcast is like QueueBroadcast but you can use a nonzero value for
// the initial transmit tier assigned to the message. This is meant to be used
// for unit testing.
func (q *TransmitLimitedQueue) queueBroadcast(b Broadcast, initialTransmits int) {
q.mu.Lock()
defer q.mu.Unlock()
q.lazyInit()
if q.idGen == math.MaxInt64 {
// it's super duper unlikely to wrap around within the retransmit limit
q.idGen = 1
} else {
q.idGen++
}
id := q.idGen
lb := &limitedBroadcast{
transmits: initialTransmits,
msgLen: int64(len(b.Message())),
id: id,
b: b,
}
unique := false
if nb, ok := b.(NamedBroadcast); ok {
lb.name = nb.Name()
} else if _, ok := b.(UniqueBroadcast); ok {
unique = true
}
// Check if this message invalidates another.
if lb.name != "" {
if old, ok := q.tm[lb.name]; ok {
old.b.Finished()
q.deleteItem(old)
}
} else if !unique {
// Slow path, hopefully nothing hot hits this.
var remove []*limitedBroadcast
q.tq.Ascend(func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Special Broadcasts can only invalidate each other.
switch cur.b.(type) {
case NamedBroadcast:
// noop
case UniqueBroadcast:
// noop
default:
if b.Invalidates(cur.b) {
cur.b.Finished()
remove = append(remove, cur)
}
}
return true
})
for _, cur := range remove {
q.deleteItem(cur)
}
}
// Append to the relevant queue.
q.addItem(lb)
}
// deleteItem removes the given item from the overall datastructure. You
// must already hold the mutex.
func (q *TransmitLimitedQueue) deleteItem(cur *limitedBroadcast) {
_ = q.tq.Delete(cur)
if cur.name != "" {
delete(q.tm, cur.name)
}
if q.tq.Len() == 0 {
// At idle there's no reason to let the id generator keep going
// indefinitely.
q.idGen = 0
}
}
// addItem adds the given item into the overall datastructure. You must already
// hold the mutex.
func (q *TransmitLimitedQueue) addItem(cur *limitedBroadcast) {
_ = q.tq.ReplaceOrInsert(cur)
if cur.name != "" {
q.tm[cur.name] = cur
}
} | // getTransmitRange returns a pair of min/max values for transmit values
// represented by the current queue contents. Both values represent actual
// transmit values on the interval [0, len). You must already hold the mutex.
func (q *TransmitLimitedQueue) getTransmitRange() (minTransmit, maxTransmit int) {
if q.lenLocked() == 0 {
return 0, 0
}
minItem, maxItem := q.tq.Min(), q.tq.Max()
if minItem == nil || maxItem == nil {
return 0, 0
}
min := minItem.(*limitedBroadcast).transmits
max := maxItem.(*limitedBroadcast).transmits
return min, max
}
// GetBroadcasts is used to get a number of broadcasts, up to a byte limit
// and applying a per-message overhead as provided.
func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte {
q.mu.Lock()
defer q.mu.Unlock()
// Fast path the default case
if q.lenLocked() == 0 {
return nil
}
transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes())
var (
bytesUsed int
toSend [][]byte
reinsert []*limitedBroadcast
)
// Visit fresher items first, but only look at stuff that will fit.
// We'll go tier by tier, grabbing the largest items first.
minTr, maxTr := q.getTransmitRange()
for transmits := minTr; transmits <= maxTr; /*do not advance automatically*/ {
free := int64(limit - bytesUsed - overhead)
if free <= 0 {
break // bail out early
}
// Search for the least element on a given tier (by transmit count) as
// defined in the limitedBroadcast.Less function that will fit into our
// remaining space.
greaterOrEqual := &limitedBroadcast{
transmits: transmits,
msgLen: free,
id: math.MaxInt64,
}
lessThan := &limitedBroadcast{
transmits: transmits + 1,
msgLen: math.MaxInt64,
id: math.MaxInt64,
}
var keep *limitedBroadcast
q.tq.AscendRange(greaterOrEqual, lessThan, func(item btree.Item) bool {
cur := item.(*limitedBroadcast)
// Check if this is within our limits
if int64(len(cur.b.Message())) > free {
// If this happens it's a bug in the datastructure or
// surrounding use doing something like having len(Message())
// change over time. There's enough going on here that it's
// probably sane to just skip it and move on for now.
return true
}
keep = cur
return false
})
if keep == nil {
// No more items of an appropriate size in the tier.
transmits++
continue
}
msg := keep.b.Message()
// Add to slice to send
bytesUsed += overhead + len(msg)
toSend = append(toSend, msg)
// Check if we should stop transmission
q.deleteItem(keep)
if keep.transmits+1 >= transmitLimit {
keep.b.Finished()
} else {
// We need to bump this item down to another transmit tier, but
// because it would be in the same direction that we're walking the
// tiers, we will have to delay the reinsertion until we are
// finished our search. Otherwise we'll possibly re-add the message
// when we ascend to the next tier.
keep.transmits++
reinsert = append(reinsert, keep)
}
}
for _, cur := range reinsert {
q.addItem(cur)
}
return toSend
}
// NumQueued returns the number of queued messages
func (q *TransmitLimitedQueue) NumQueued() int {
q.mu.Lock()
defer q.mu.Unlock()
return q.lenLocked()
}
// lenLocked returns the length of the overall queue datastructure. You must
// hold the mutex.
func (q *TransmitLimitedQueue) lenLocked() int {
if q.tq == nil {
return 0
}
return q.tq.Len()
}
// Reset clears all the queued messages. Should only be used for tests.
func (q *TransmitLimitedQueue) Reset() {
q.mu.Lock()
defer q.mu.Unlock()
q.walkReadOnlyLocked(false, func(cur *limitedBroadcast) bool {
cur.b.Finished()
return true
})
q.tq = nil
q.tm = nil
q.idGen = 0
}
// Prune will retain the maxRetain latest messages, and the rest
// will be discarded. This can be used to prevent unbounded queue sizes
func (q *TransmitLimitedQueue) Prune(maxRetain int) {
q.mu.Lock()
defer q.mu.Unlock()
// Do nothing if queue size is less than the limit
for q.tq.Len() > maxRetain {
item := q.tq.Max()
if item == nil {
break
}
cur := item.(*limitedBroadcast)
cur.b.Finished()
q.deleteItem(cur)
}
} | random_line_split | |
engine.go | package bengine
/////////////////////////////////////////////////////////////////////
// imports
import (
"math/rand"
. "github.com/easychessanimations/gochess/butils"
)
/////////////////////////////////////////////////////////////////////
// package bengine implements board, move generation and position searching
//
// the package can be used as a general library for chess tool writing and
// provides the core functionality for the zurichess chess engine
//
// position (basic.go, position.go) uses:
//
// * bitboards for representation - https://chessprogramming.wikispaces.com/Bitboards
// * magic bitboards for sliding move generation - https://chessprogramming.wikispaces.com/Magic+Bitboards
//
// search (engine.go) features implemented are:
//
// * aspiration window - https://chessprogramming.wikispaces.com/Aspiration+Windows
// * check extension - https://chessprogramming.wikispaces.com/Check+Extensions
// * fail soft - https://chessprogramming.wikispaces.com/Fail-Soft
// * futility Pruning - https://chessprogramming.wikispaces.com/Futility+pruning
// * history leaf pruning - https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// * killer move heuristic - https://chessprogramming.wikispaces.com/Killer+Heuristic
// * late move redution (LMR) - https://chessprogramming.wikispaces.com/Late+Move+Reductions
// * mate distance pruning - https://chessprogramming.wikispaces.com/Mate+Distance+Pruning
// * negamax framework - http://chessprogramming.wikispaces.com/Alpha-Beta#Implementation-Negamax%20Framework
// * null move prunning (NMP) - https://chessprogramming.wikispaces.com/Null+Move+Pruning
// * principal variation search (PVS) - https://chessprogramming.wikispaces.com/Principal+Variation+Search
// * quiescence search - https://chessprogramming.wikispaces.com/Quiescence+Search
// * razoring - https://chessprogramming.wikispaces.com/Razoring
// * static Single Evaluation - https://chessprogramming.wikispaces.com/Static+Exchange+Evaluation
// * zobrist hashing - https://chessprogramming.wikispaces.com/Zobrist+Hashing
//
// move ordering (move_ordering.go) consists of:
//
// * hash move heuristic
// * captures sorted by MVVLVA - https://chessprogramming.wikispaces.com/MVV-LVA
// * killer moves - https://chessprogramming.wikispaces.com/Killer+Move
// * history Heuristic - https://chessprogramming.wikispaces.com/History+Heuristic
// * countermove Heuristic - https://chessprogramming.wikispaces.com/Countermove+Heuristic
//
// evaluation (material.go) consists of
//
// * material and mobility
// * piece square tables
// * king pawn shield - https://chessprogramming.wikispaces.com/King+Safety
// * king safery ala Toga style - https://chessprogramming.wikispaces.com/King+Safety#Attacking%20King%20Zone
// * pawn structure: connected, isolated, double, passed, rammed. Evaluation is cached (see cache.go)
// * attacks on minors and majors
// * rooks on open and semiopenfiles - https://chessprogramming.wikispaces.com/Rook+on+Open+File
// * tapered evaluation - https://chessprogramming.wikispaces.com/Tapered+Eval
/////////////////////////////////////////////////////////////////////
// member functions
// tryMove descends on the search tree; this function
// is called from searchTree after the move is executed
// and it will undo the move
//
// α, β represent lower and upper bounds
// depth is the remaining depth (decreasing)
// lmr is how much to reduce a late move. Implies non-null move
// nullWindow indicates whether to scout first. Implies non-null move
//
// returns the score from the deeper search
func (eng *Engine) tryMove(α, β, depth, lmr int32, nullWindow bool) int32 {
depth--
score := α + 1
if lmr > 0 { // reduce late moves
score = -eng.searchTree(-α-1, -α, depth-lmr)
}
if score > α { // if late move reduction is disabled or has failed
if nullWindow {
score = -eng.searchTree(-α-1, -α, depth)
if α < score && score < β {
score = -eng.searchTree(-β, -α, depth)
}
} else {
score = -eng.searchTree(-β, -α, depth)
}
}
eng.UndoMove()
return score
}
// isIgnoredRootMove returns true if move should be ignored at root
func (eng *Engine) isIgnoredRootMove(move Move) bool {
if eng.ply() != 0 {
return false
}
for _, m := range eng.ignoreRootMoves {
if m == move {
return true
}
}
for _, m := range eng.onlyRootMoves {
if m == move {
return false
}
}
return len(eng.onlyRootMoves) != 0
}
// searchTree implements searchTree framework
//
// searchTree fails soft, i.e. the score returned can be outside the bounds
//
// α, β represent lower and upper bounds
// depth is the search depth (decreasing)
//
// returns the score of the current position up to depth (modulo reductions/extensions)
// the returned score is from current player's POV
//
// invariants:
// if score <= α then the search failed low and the score is an upper bound
// else if score >= β then the search failed high and the score is a lower bound
// else score is exact
//
// assuming this is a maximizing nodes, failing high means that a
// minimizing ancestor node already has a better alternative
func (eng *Engine) searchTree(α, β, depth int32) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
| // dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. See PlayMoves for the returned values
func (eng *Engine) Play(tc *TimeControl) (score int32, moves []Move) {
return eng.PlayMoves(tc, nil)
}
// PlayMoves evaluates current position searching only moves specifid by rootMoves
//
// returns the principal variation, that is
// moves[0] is the best move found and
// moves[1] is the pondering move
//
// if rootMoves is nil searches all root moves
//
// returns a nil pv if no move was found because the game is already finished
// returns empty pv array if it's valid position, but no pv was found (e.g. search depth is 0)
//
// Time control, tc, should already be started
func (eng *Engine) PlayMoves(tc *TimeControl, rootMoves []Move) (score int32, moves []Move) {
if !initialized {
initEngine()
}
eng.Log.BeginSearch()
eng.Stats = Stats{Depth: -1}
eng.rootPly = eng.Position.Ply
eng.timeControl = tc
eng.stopped = false
eng.checkpoint = checkpointStep
eng.stack.Reset(eng.Position)
eng.history.newSearch()
eng.onlyRootMoves = rootMoves
for depth := int32(0); depth < 64; depth++ {
if !tc.NextDepth(depth) {
// stop if tc control says we are done
// search at least one depth, otherwise a move cannot be returned
break
}
eng.Stats.Depth = depth
if s, m := eng.searchMultiPV(depth, score); len(moves) == 0 || len(m) != 0 {
score, moves = s, m
}
}
eng.Log.EndSearch()
if len(moves) == 0 && !eng.Position.HasLegalMoves() {
return 0, nil
} else if moves == nil {
return score, []Move{}
}
return score, moves
}
// ply returns the ply from the beginning of the search
func (eng *Engine) ply() int32 {
return int32(eng.Position.Ply - eng.rootPly)
}
// SetPosition sets current position
// if pos is nil, the starting position is set
func (eng *Engine) SetPosition(pos *Position) {
if pos != nil {
eng.Position = pos
} else {
eng.Position, _ = PositionFromFEN(FENStartPos)
}
}
// DoMove executes a move.
func (eng *Engine) DoMove(move Move) {
eng.Position.DoMove(move)
GlobalHashTable.prefetch(eng.Position)
}
// UndoMove undoes the last move
func (eng *Engine) UndoMove() {
eng.Position.UndoMove()
}
// Score evaluates current position from current player's POV
func (eng *Engine) Score() int32 {
return Evaluate(eng.Position).GetCentipawnsScore() * eng.Position.Us().Multiplier()
}
// cachedScore implements a cache on top of Score
// the cached static evaluation is stored in the hashEntry
func (eng *Engine) cachedScore(e *hashEntry) int32 {
if e.kind&hasStatic == 0 {
e.kind |= hasStatic
e.static = int16(eng.Score())
}
return int32(e.static)
}
// endPosition determines whether the current position is an end game
// returns score and a bool if the game has ended
func (eng *Engine) endPosition() (int32, bool) {
pos := eng.Position // shortcut
// trivial cases when kings are missing
if Kings(pos, White) == 0 {
if Kings(pos, Black) == 0 {
return 0, true // both kings are missing
}
return pos.Us().Multiplier() * (MatedScore + eng.ply()), true
}
if Kings(pos, Black) == 0 {
return pos.Us().Multiplier() * (MateScore - eng.ply()), true
}
// neither side can mate
if pos.InsufficientMaterial() {
return 0, true
}
// fifty full moves without a capture or a pawn move
if pos.FiftyMoveRule() {
return 0, true
}
// repetition is a draw
// at root we need to continue searching even if we saw two repetitions already,
// however we can prune deeper search only at two repetitions
if r := pos.ThreeFoldRepetition(); eng.ply() > 0 && r >= 2 || r >= 3 {
return 0, true
}
return 0, false
}
// retrieveHash gets from GlobalHashTable the current position
func (eng *Engine) retrieveHash() hashEntry {
entry := GlobalHashTable.get(eng.Position)
if entry.kind == 0 || entry.move != NullMove && !eng.Position.IsPseudoLegal(entry.move) {
eng.Stats.CacheMiss++
return hashEntry{}
}
// return mate score relative to root
// the score was adjusted relative to position before the hash table was updated
if entry.score < KnownLossScore {
entry.score += int16(eng.ply())
} else if entry.score > KnownWinScore {
entry.score -= int16(eng.ply())
}
eng.Stats.CacheHit++
return entry
}
// updateHash updates GlobalHashTable with the current position
func (eng *Engine) updateHash(flags hashFlags, depth, score int32, move Move, static int32) {
// if search is stopped then score cannot be trusted
if eng.stopped {
return
}
// update principal variation table in exact nodes
if flags&exact != 0 {
eng.pvTable.Put(eng.Position, move)
}
if eng.ply() == 0 && (len(eng.ignoreRootMoves) != 0 || len(eng.onlyRootMoves) != 0) {
// at root if there are moves to ignore (e.g. because of multipv)
// then this is an incomplete search, so don't update the hash
return
}
// save the mate score relative to the current position
// when retrieving from hash the score will be adjusted relative to root
if score < KnownLossScore {
score -= eng.ply()
} else if score > KnownWinScore {
score += eng.ply()
}
GlobalHashTable.put(eng.Position, hashEntry{
kind: flags,
score: int16(score),
depth: int8(depth),
move: move,
static: int16(static),
})
}
// searchQuiescence evaluates the position after solving all captures
//
// this is a very limited search which considers only some violent moves
// depth is ignored, so hash uses depth 0; search continues until
// stand pat or no capture can improve the score
func (eng *Engine) searchQuiescence(α, β int32) int32 {
eng.Stats.Nodes++
entry := eng.retrieveHash()
if score := int32(entry.score); isInBounds(entry.kind, α, β, score) {
return score
}
static := eng.cachedScore(&entry)
if static >= β {
// stand pat if the static score is already a cut-off
eng.updateHash(failedHigh|hasStatic, 0, static, entry.move, static)
return static
}
pos := eng.Position
us := pos.Us()
inCheck := pos.IsChecked(us)
localα := max(α, static)
bestMove := entry.move
eng.stack.GenerateMoves(Violent, NullMove)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
// prune futile moves that would anyway result in a stand-pat at that next depth
if !inCheck && isFutile(pos, static, α, futilityMargin, move) ||
!inCheck && seeSign(pos, move) {
continue
}
// discard illegal or losing captures
eng.DoMove(move)
if eng.Position.IsChecked(us) {
eng.UndoMove()
continue
}
score := -eng.searchQuiescence(-β, -localα)
eng.UndoMove()
if score >= β {
eng.updateHash(failedHigh|hasStatic, 0, score, move, static)
return score
}
if score > localα {
localα = score
bestMove = move
}
}
eng.updateHash(getBound(α, β, localα)|hasStatic, 0, localα, bestMove, static)
return localα
}
func initEngine() {
var fens = [FigureArraySize]string{
Pawn: "rnbqkbnr/ppp1pppp/8/8/3P4/8/PPP1PPPP/RNBQKBNR w - - 0 1",
Knight: "r1bqkbnr/pppppppp/8/8/3N4/8/PPPPPPPP/R1BQKBNR w - - 0 1",
Bishop: "rn1qkbnr/pppppppp/8/8/3B4/8/PPPPPPPP/RN1QKBNR w - - 0 1",
Rook: "rnbqkbn1/pppppppp/8/8/3R4/8/PPPPPPPP/RNBQKBN1 w - - 0 1",
Queen: "rnb1kbnr/pppppppp/8/8/3Q4/8/PPPPPPPP/RNB1KBNR w - - 0 1",
}
for f, fen := range fens {
if fen != "" {
pos, _ := PositionFromFEN(fen)
futilityFigureBonus[f] = Evaluate(pos).GetCentipawnsScore()
}
}
initialized = true
}
///////////////////////////////////////////////////////////////////// | // principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore) | random_line_split |
engine.go | package bengine
/////////////////////////////////////////////////////////////////////
// imports
import (
"math/rand"
. "github.com/easychessanimations/gochess/butils"
)
/////////////////////////////////////////////////////////////////////
// package bengine implements board, move generation and position searching
//
// the package can be used as a general library for chess tool writing and
// provides the core functionality for the zurichess chess engine
//
// position (basic.go, position.go) uses:
//
// * bitboards for representation - https://chessprogramming.wikispaces.com/Bitboards
// * magic bitboards for sliding move generation - https://chessprogramming.wikispaces.com/Magic+Bitboards
//
// search (engine.go) features implemented are:
//
// * aspiration window - https://chessprogramming.wikispaces.com/Aspiration+Windows
// * check extension - https://chessprogramming.wikispaces.com/Check+Extensions
// * fail soft - https://chessprogramming.wikispaces.com/Fail-Soft
// * futility Pruning - https://chessprogramming.wikispaces.com/Futility+pruning
// * history leaf pruning - https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// * killer move heuristic - https://chessprogramming.wikispaces.com/Killer+Heuristic
// * late move redution (LMR) - https://chessprogramming.wikispaces.com/Late+Move+Reductions
// * mate distance pruning - https://chessprogramming.wikispaces.com/Mate+Distance+Pruning
// * negamax framework - http://chessprogramming.wikispaces.com/Alpha-Beta#Implementation-Negamax%20Framework
// * null move prunning (NMP) - https://chessprogramming.wikispaces.com/Null+Move+Pruning
// * principal variation search (PVS) - https://chessprogramming.wikispaces.com/Principal+Variation+Search
// * quiescence search - https://chessprogramming.wikispaces.com/Quiescence+Search
// * razoring - https://chessprogramming.wikispaces.com/Razoring
// * static Single Evaluation - https://chessprogramming.wikispaces.com/Static+Exchange+Evaluation
// * zobrist hashing - https://chessprogramming.wikispaces.com/Zobrist+Hashing
//
// move ordering (move_ordering.go) consists of:
//
// * hash move heuristic
// * captures sorted by MVVLVA - https://chessprogramming.wikispaces.com/MVV-LVA
// * killer moves - https://chessprogramming.wikispaces.com/Killer+Move
// * history Heuristic - https://chessprogramming.wikispaces.com/History+Heuristic
// * countermove Heuristic - https://chessprogramming.wikispaces.com/Countermove+Heuristic
//
// evaluation (material.go) consists of
//
// * material and mobility
// * piece square tables
// * king pawn shield - https://chessprogramming.wikispaces.com/King+Safety
// * king safery ala Toga style - https://chessprogramming.wikispaces.com/King+Safety#Attacking%20King%20Zone
// * pawn structure: connected, isolated, double, passed, rammed. Evaluation is cached (see cache.go)
// * attacks on minors and majors
// * rooks on open and semiopenfiles - https://chessprogramming.wikispaces.com/Rook+on+Open+File
// * tapered evaluation - https://chessprogramming.wikispaces.com/Tapered+Eval
/////////////////////////////////////////////////////////////////////
// member functions
// tryMove descends on the search tree; this function
// is called from searchTree after the move is executed
// and it will undo the move
//
// α, β represent lower and upper bounds
// depth is the remaining depth (decreasing)
// lmr is how much to reduce a late move. Implies non-null move
// nullWindow indicates whether to scout first. Implies non-null move
//
// returns the score from the deeper search
func (eng *Engine) tryMove(α, β, depth, lmr int32, nullWindow bool) int32 {
depth--
score := α + 1
if lmr > 0 { // reduce late moves
score = -eng.searchTree(-α-1, -α, depth-lmr)
}
if score > α { // if late move reduction is disabled or has failed
if nullWindow {
score = -eng.searchTree(-α-1, -α, depth)
if α < score && score < β {
score = -eng.searchTree(-β, -α, depth)
}
} else {
score = -eng.searchTree(-β, -α, depth)
}
}
eng.UndoMove()
return score
}
// isIgnoredRootMove returns true if move should be ignored at root
func (eng *Engine) isIgnoredRootMove(move Move) bool {
if eng.ply() | implements searchTree framework
//
// searchTree fails soft, i.e. the score returned can be outside the bounds
//
// α, β represent lower and upper bounds
// depth is the search depth (decreasing)
//
// returns the score of the current position up to depth (modulo reductions/extensions)
// the returned score is from current player's POV
//
// invariants:
// if score <= α then the search failed low and the score is an upper bound
// else if score >= β then the search failed high and the score is a lower bound
// else score is exact
//
// assuming this is a maximizing nodes, failing high means that a
// minimizing ancestor node already has a better alternative
func (eng *Engine) searchTree(α, β, depth int32) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
// principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore)
// dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. See PlayMoves for the returned values
func (eng *Engine) Play(tc *TimeControl) (score int32, moves []Move) {
return eng.PlayMoves(tc, nil)
}
// PlayMoves evaluates current position searching only moves specifid by rootMoves
//
// returns the principal variation, that is
// moves[0] is the best move found and
// moves[1] is the pondering move
//
// if rootMoves is nil searches all root moves
//
// returns a nil pv if no move was found because the game is already finished
// returns empty pv array if it's valid position, but no pv was found (e.g. search depth is 0)
//
// Time control, tc, should already be started
func (eng *Engine) PlayMoves(tc *TimeControl, rootMoves []Move) (score int32, moves []Move) {
if !initialized {
initEngine()
}
eng.Log.BeginSearch()
eng.Stats = Stats{Depth: -1}
eng.rootPly = eng.Position.Ply
eng.timeControl = tc
eng.stopped = false
eng.checkpoint = checkpointStep
eng.stack.Reset(eng.Position)
eng.history.newSearch()
eng.onlyRootMoves = rootMoves
for depth := int32(0); depth < 64; depth++ {
if !tc.NextDepth(depth) {
// stop if tc control says we are done
// search at least one depth, otherwise a move cannot be returned
break
}
eng.Stats.Depth = depth
if s, m := eng.searchMultiPV(depth, score); len(moves) == 0 || len(m) != 0 {
score, moves = s, m
}
}
eng.Log.EndSearch()
if len(moves) == 0 && !eng.Position.HasLegalMoves() {
return 0, nil
} else if moves == nil {
return score, []Move{}
}
return score, moves
}
// ply returns the ply from the beginning of the search
func (eng *Engine) ply() int32 {
return int32(eng.Position.Ply - eng.rootPly)
}
// SetPosition sets current position
// if pos is nil, the starting position is set
func (eng *Engine) SetPosition(pos *Position) {
if pos != nil {
eng.Position = pos
} else {
eng.Position, _ = PositionFromFEN(FENStartPos)
}
}
// DoMove executes a move.
func (eng *Engine) DoMove(move Move) {
eng.Position.DoMove(move)
GlobalHashTable.prefetch(eng.Position)
}
// UndoMove undoes the last move
func (eng *Engine) UndoMove() {
eng.Position.UndoMove()
}
// Score evaluates current position from current player's POV
func (eng *Engine) Score() int32 {
return Evaluate(eng.Position).GetCentipawnsScore() * eng.Position.Us().Multiplier()
}
// cachedScore implements a cache on top of Score
// the cached static evaluation is stored in the hashEntry
func (eng *Engine) cachedScore(e *hashEntry) int32 {
if e.kind&hasStatic == 0 {
e.kind |= hasStatic
e.static = int16(eng.Score())
}
return int32(e.static)
}
// endPosition determines whether the current position is an end game
// returns score and a bool if the game has ended
func (eng *Engine) endPosition() (int32, bool) {
pos := eng.Position // shortcut
// trivial cases when kings are missing
if Kings(pos, White) == 0 {
if Kings(pos, Black) == 0 {
return 0, true // both kings are missing
}
return pos.Us().Multiplier() * (MatedScore + eng.ply()), true
}
if Kings(pos, Black) == 0 {
return pos.Us().Multiplier() * (MateScore - eng.ply()), true
}
// neither side can mate
if pos.InsufficientMaterial() {
return 0, true
}
// fifty full moves without a capture or a pawn move
if pos.FiftyMoveRule() {
return 0, true
}
// repetition is a draw
// at root we need to continue searching even if we saw two repetitions already,
// however we can prune deeper search only at two repetitions
if r := pos.ThreeFoldRepetition(); eng.ply() > 0 && r >= 2 || r >= 3 {
return 0, true
}
return 0, false
}
// retrieveHash gets from GlobalHashTable the current position
func (eng *Engine) retrieveHash() hashEntry {
entry := GlobalHashTable.get(eng.Position)
if entry.kind == 0 || entry.move != NullMove && !eng.Position.IsPseudoLegal(entry.move) {
eng.Stats.CacheMiss++
return hashEntry{}
}
// return mate score relative to root
// the score was adjusted relative to position before the hash table was updated
if entry.score < KnownLossScore {
entry.score += int16(eng.ply())
} else if entry.score > KnownWinScore {
entry.score -= int16(eng.ply())
}
eng.Stats.CacheHit++
return entry
}
// updateHash updates GlobalHashTable with the current position
func (eng *Engine) updateHash(flags hashFlags, depth, score int32, move Move, static int32) {
// if search is stopped then score cannot be trusted
if eng.stopped {
return
}
// update principal variation table in exact nodes
if flags&exact != 0 {
eng.pvTable.Put(eng.Position, move)
}
if eng.ply() == 0 && (len(eng.ignoreRootMoves) != 0 || len(eng.onlyRootMoves) != 0) {
// at root if there are moves to ignore (e.g. because of multipv)
// then this is an incomplete search, so don't update the hash
return
}
// save the mate score relative to the current position
// when retrieving from hash the score will be adjusted relative to root
if score < KnownLossScore {
score -= eng.ply()
} else if score > KnownWinScore {
score += eng.ply()
}
GlobalHashTable.put(eng.Position, hashEntry{
kind: flags,
score: int16(score),
depth: int8(depth),
move: move,
static: int16(static),
})
}
// searchQuiescence evaluates the position after solving all captures
//
// this is a very limited search which considers only some violent moves
// depth is ignored, so hash uses depth 0; search continues until
// stand pat or no capture can improve the score
func (eng *Engine) searchQuiescence(α, β int32) int32 {
eng.Stats.Nodes++
entry := eng.retrieveHash()
if score := int32(entry.score); isInBounds(entry.kind, α, β, score) {
return score
}
static := eng.cachedScore(&entry)
if static >= β {
// stand pat if the static score is already a cut-off
eng.updateHash(failedHigh|hasStatic, 0, static, entry.move, static)
return static
}
pos := eng.Position
us := pos.Us()
inCheck := pos.IsChecked(us)
localα := max(α, static)
bestMove := entry.move
eng.stack.GenerateMoves(Violent, NullMove)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
// prune futile moves that would anyway result in a stand-pat at that next depth
if !inCheck && isFutile(pos, static, α, futilityMargin, move) ||
!inCheck && seeSign(pos, move) {
continue
}
// discard illegal or losing captures
eng.DoMove(move)
if eng.Position.IsChecked(us) {
eng.UndoMove()
continue
}
score := -eng.searchQuiescence(-β, -localα)
eng.UndoMove()
if score >= β {
eng.updateHash(failedHigh|hasStatic, 0, score, move, static)
return score
}
if score > localα {
localα = score
bestMove = move
}
}
eng.updateHash(getBound(α, β, localα)|hasStatic, 0, localα, bestMove, static)
return localα
}
func initEngine() {
var fens = [FigureArraySize]string{
Pawn: "rnbqkbnr/ppp1pppp/8/8/3P4/8/PPP1PPPP/RNBQKBNR w - - 0 1",
Knight: "r1bqkbnr/pppppppp/8/8/3N4/8/PPPPPPPP/R1BQKBNR w - - 0 1",
Bishop: "rn1qkbnr/pppppppp/8/8/3B4/8/PPPPPPPP/RN1QKBNR w - - 0 1",
Rook: "rnbqkbn1/pppppppp/8/8/3R4/8/PPPPPPPP/RNBQKBN1 w - - 0 1",
Queen: "rnb1kbnr/pppppppp/8/8/3Q4/8/PPPPPPPP/RNB1KBNR w - - 0 1",
}
for f, fen := range fens {
if fen != "" {
pos, _ := PositionFromFEN(fen)
futilityFigureBonus[f] = Evaluate(pos).GetCentipawnsScore()
}
}
initialized = true
}
/////////////////////////////////////////////////////////////////////
| != 0 {
return false
}
for _, m := range eng.ignoreRootMoves {
if m == move {
return true
}
}
for _, m := range eng.onlyRootMoves {
if m == move {
return false
}
}
return len(eng.onlyRootMoves) != 0
}
// searchTree | identifier_body |
engine.go | package bengine
/////////////////////////////////////////////////////////////////////
// imports
import (
"math/rand"
. "github.com/easychessanimations/gochess/butils"
)
/////////////////////////////////////////////////////////////////////
// package bengine implements board, move generation and position searching
//
// the package can be used as a general library for chess tool writing and
// provides the core functionality for the zurichess chess engine
//
// position (basic.go, position.go) uses:
//
// * bitboards for representation - https://chessprogramming.wikispaces.com/Bitboards
// * magic bitboards for sliding move generation - https://chessprogramming.wikispaces.com/Magic+Bitboards
//
// search (engine.go) features implemented are:
//
// * aspiration window - https://chessprogramming.wikispaces.com/Aspiration+Windows
// * check extension - https://chessprogramming.wikispaces.com/Check+Extensions
// * fail soft - https://chessprogramming.wikispaces.com/Fail-Soft
// * futility Pruning - https://chessprogramming.wikispaces.com/Futility+pruning
// * history leaf pruning - https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// * killer move heuristic - https://chessprogramming.wikispaces.com/Killer+Heuristic
// * late move redution (LMR) - https://chessprogramming.wikispaces.com/Late+Move+Reductions
// * mate distance pruning - https://chessprogramming.wikispaces.com/Mate+Distance+Pruning
// * negamax framework - http://chessprogramming.wikispaces.com/Alpha-Beta#Implementation-Negamax%20Framework
// * null move prunning (NMP) - https://chessprogramming.wikispaces.com/Null+Move+Pruning
// * principal variation search (PVS) - https://chessprogramming.wikispaces.com/Principal+Variation+Search
// * quiescence search - https://chessprogramming.wikispaces.com/Quiescence+Search
// * razoring - https://chessprogramming.wikispaces.com/Razoring
// * static Single Evaluation - https://chessprogramming.wikispaces.com/Static+Exchange+Evaluation
// * zobrist hashing - https://chessprogramming.wikispaces.com/Zobrist+Hashing
//
// move ordering (move_ordering.go) consists of:
//
// * hash move heuristic
// * captures sorted by MVVLVA - https://chessprogramming.wikispaces.com/MVV-LVA
// * killer moves - https://chessprogramming.wikispaces.com/Killer+Move
// * history Heuristic - https://chessprogramming.wikispaces.com/History+Heuristic
// * countermove Heuristic - https://chessprogramming.wikispaces.com/Countermove+Heuristic
//
// evaluation (material.go) consists of
//
// * material and mobility
// * piece square tables
// * king pawn shield - https://chessprogramming.wikispaces.com/King+Safety
// * king safery ala Toga style - https://chessprogramming.wikispaces.com/King+Safety#Attacking%20King%20Zone
// * pawn structure: connected, isolated, double, passed, rammed. Evaluation is cached (see cache.go)
// * attacks on minors and majors
// * rooks on open and semiopenfiles - https://chessprogramming.wikispaces.com/Rook+on+Open+File
// * tapered evaluation - https://chessprogramming.wikispaces.com/Tapered+Eval
/////////////////////////////////////////////////////////////////////
// member functions
// tryMove descends on the search tree; this function
// is called from searchTree after the move is executed
// and it will undo the move
//
// α, β represent lower and upper bounds
// depth is the remaining depth (decreasing)
// lmr is how much to reduce a late move. Implies non-null move
// nullWindow indicates whether to scout first. Implies non-null move
//
// returns the score from the deeper search
func (eng *Engine) tryMove(α, β, depth, lmr int32, nullWindow bool) int32 {
depth--
score := α + 1
if lmr > 0 { // reduce late moves
score = -eng.searchTree(-α-1, -α, depth-lmr)
}
if score > α { // if late move reduction is disabled or has failed
if nullWindow {
score = -eng.searchTree(-α-1, -α, depth)
if α < score && score < β {
score = -eng.searchTree(-β, -α, depth)
}
} else {
score = -eng.searchTree(-β, -α, depth)
}
}
eng.UndoMove()
return score
}
// isIgnoredRootMove returns true if move should be ignored at root
func (eng *Engine) isIgnoredRootMove(move Move) bool {
if eng.ply() != 0 {
return false
}
for _, m := range eng.ignoreRootMoves {
if m == move {
return true
}
}
for _, m := range eng.onlyRootMoves {
if m == move {
return false
}
}
return len(eng.onlyRootMoves) != 0
}
// searchTree implements searchTree framework
//
// searchTree fails soft, i.e. the score returned can be outside the bounds
//
// α, β represent lower and upper bounds
// depth is the search depth (decreasing)
//
// returns the score of the current position up to depth (modulo reductions/extensions)
// the returned score is from current player's POV
//
// invariants:
// if score <= α then the search failed low and the score is an upper bound
// else if score >= β then the search failed high and the score is a lower bound
// else score is exact
//
// assuming this is a maximizing nodes, failing high means that a
// minimizing ancestor node already has a better alternative
func (eng *Engine) searchTree(α, β, depth int32) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
// principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore)
// dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. S | es for the returned values
func (eng *Engine) Play(tc *TimeControl) (score int32, moves []Move) {
return eng.PlayMoves(tc, nil)
}
// PlayMoves evaluates current position searching only moves specifid by rootMoves
//
// returns the principal variation, that is
// moves[0] is the best move found and
// moves[1] is the pondering move
//
// if rootMoves is nil searches all root moves
//
// returns a nil pv if no move was found because the game is already finished
// returns empty pv array if it's valid position, but no pv was found (e.g. search depth is 0)
//
// Time control, tc, should already be started
func (eng *Engine) PlayMoves(tc *TimeControl, rootMoves []Move) (score int32, moves []Move) {
if !initialized {
initEngine()
}
eng.Log.BeginSearch()
eng.Stats = Stats{Depth: -1}
eng.rootPly = eng.Position.Ply
eng.timeControl = tc
eng.stopped = false
eng.checkpoint = checkpointStep
eng.stack.Reset(eng.Position)
eng.history.newSearch()
eng.onlyRootMoves = rootMoves
for depth := int32(0); depth < 64; depth++ {
if !tc.NextDepth(depth) {
// stop if tc control says we are done
// search at least one depth, otherwise a move cannot be returned
break
}
eng.Stats.Depth = depth
if s, m := eng.searchMultiPV(depth, score); len(moves) == 0 || len(m) != 0 {
score, moves = s, m
}
}
eng.Log.EndSearch()
if len(moves) == 0 && !eng.Position.HasLegalMoves() {
return 0, nil
} else if moves == nil {
return score, []Move{}
}
return score, moves
}
// ply returns the ply from the beginning of the search
func (eng *Engine) ply() int32 {
return int32(eng.Position.Ply - eng.rootPly)
}
// SetPosition sets current position
// if pos is nil, the starting position is set
func (eng *Engine) SetPosition(pos *Position) {
if pos != nil {
eng.Position = pos
} else {
eng.Position, _ = PositionFromFEN(FENStartPos)
}
}
// DoMove executes a move.
func (eng *Engine) DoMove(move Move) {
eng.Position.DoMove(move)
GlobalHashTable.prefetch(eng.Position)
}
// UndoMove undoes the last move
func (eng *Engine) UndoMove() {
eng.Position.UndoMove()
}
// Score evaluates current position from current player's POV
func (eng *Engine) Score() int32 {
return Evaluate(eng.Position).GetCentipawnsScore() * eng.Position.Us().Multiplier()
}
// cachedScore implements a cache on top of Score
// the cached static evaluation is stored in the hashEntry
func (eng *Engine) cachedScore(e *hashEntry) int32 {
if e.kind&hasStatic == 0 {
e.kind |= hasStatic
e.static = int16(eng.Score())
}
return int32(e.static)
}
// endPosition determines whether the current position is an end game
// returns score and a bool if the game has ended
func (eng *Engine) endPosition() (int32, bool) {
pos := eng.Position // shortcut
// trivial cases when kings are missing
if Kings(pos, White) == 0 {
if Kings(pos, Black) == 0 {
return 0, true // both kings are missing
}
return pos.Us().Multiplier() * (MatedScore + eng.ply()), true
}
if Kings(pos, Black) == 0 {
return pos.Us().Multiplier() * (MateScore - eng.ply()), true
}
// neither side can mate
if pos.InsufficientMaterial() {
return 0, true
}
// fifty full moves without a capture or a pawn move
if pos.FiftyMoveRule() {
return 0, true
}
// repetition is a draw
// at root we need to continue searching even if we saw two repetitions already,
// however we can prune deeper search only at two repetitions
if r := pos.ThreeFoldRepetition(); eng.ply() > 0 && r >= 2 || r >= 3 {
return 0, true
}
return 0, false
}
// retrieveHash gets from GlobalHashTable the current position
func (eng *Engine) retrieveHash() hashEntry {
entry := GlobalHashTable.get(eng.Position)
if entry.kind == 0 || entry.move != NullMove && !eng.Position.IsPseudoLegal(entry.move) {
eng.Stats.CacheMiss++
return hashEntry{}
}
// return mate score relative to root
// the score was adjusted relative to position before the hash table was updated
if entry.score < KnownLossScore {
entry.score += int16(eng.ply())
} else if entry.score > KnownWinScore {
entry.score -= int16(eng.ply())
}
eng.Stats.CacheHit++
return entry
}
// updateHash updates GlobalHashTable with the current position
func (eng *Engine) updateHash(flags hashFlags, depth, score int32, move Move, static int32) {
// if search is stopped then score cannot be trusted
if eng.stopped {
return
}
// update principal variation table in exact nodes
if flags&exact != 0 {
eng.pvTable.Put(eng.Position, move)
}
if eng.ply() == 0 && (len(eng.ignoreRootMoves) != 0 || len(eng.onlyRootMoves) != 0) {
// at root if there are moves to ignore (e.g. because of multipv)
// then this is an incomplete search, so don't update the hash
return
}
// save the mate score relative to the current position
// when retrieving from hash the score will be adjusted relative to root
if score < KnownLossScore {
score -= eng.ply()
} else if score > KnownWinScore {
score += eng.ply()
}
GlobalHashTable.put(eng.Position, hashEntry{
kind: flags,
score: int16(score),
depth: int8(depth),
move: move,
static: int16(static),
})
}
// searchQuiescence evaluates the position after solving all captures
//
// this is a very limited search which considers only some violent moves
// depth is ignored, so hash uses depth 0; search continues until
// stand pat or no capture can improve the score
func (eng *Engine) searchQuiescence(α, β int32) int32 {
eng.Stats.Nodes++
entry := eng.retrieveHash()
if score := int32(entry.score); isInBounds(entry.kind, α, β, score) {
return score
}
static := eng.cachedScore(&entry)
if static >= β {
// stand pat if the static score is already a cut-off
eng.updateHash(failedHigh|hasStatic, 0, static, entry.move, static)
return static
}
pos := eng.Position
us := pos.Us()
inCheck := pos.IsChecked(us)
localα := max(α, static)
bestMove := entry.move
eng.stack.GenerateMoves(Violent, NullMove)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
// prune futile moves that would anyway result in a stand-pat at that next depth
if !inCheck && isFutile(pos, static, α, futilityMargin, move) ||
!inCheck && seeSign(pos, move) {
continue
}
// discard illegal or losing captures
eng.DoMove(move)
if eng.Position.IsChecked(us) {
eng.UndoMove()
continue
}
score := -eng.searchQuiescence(-β, -localα)
eng.UndoMove()
if score >= β {
eng.updateHash(failedHigh|hasStatic, 0, score, move, static)
return score
}
if score > localα {
localα = score
bestMove = move
}
}
eng.updateHash(getBound(α, β, localα)|hasStatic, 0, localα, bestMove, static)
return localα
}
func initEngine() {
var fens = [FigureArraySize]string{
Pawn: "rnbqkbnr/ppp1pppp/8/8/3P4/8/PPP1PPPP/RNBQKBNR w - - 0 1",
Knight: "r1bqkbnr/pppppppp/8/8/3N4/8/PPPPPPPP/R1BQKBNR w - - 0 1",
Bishop: "rn1qkbnr/pppppppp/8/8/3B4/8/PPPPPPPP/RN1QKBNR w - - 0 1",
Rook: "rnbqkbn1/pppppppp/8/8/3R4/8/PPPPPPPP/RNBQKBN1 w - - 0 1",
Queen: "rnb1kbnr/pppppppp/8/8/3Q4/8/PPPPPPPP/RNB1KBNR w - - 0 1",
}
for f, fen := range fens {
if fen != "" {
pos, _ := PositionFromFEN(fen)
futilityFigureBonus[f] = Evaluate(pos).GetCentipawnsScore()
}
}
initialized = true
}
/////////////////////////////////////////////////////////////////////
| ee PlayMov | conditional_block |
engine.go | package bengine
/////////////////////////////////////////////////////////////////////
// imports
import (
"math/rand"
. "github.com/easychessanimations/gochess/butils"
)
/////////////////////////////////////////////////////////////////////
// package bengine implements board, move generation and position searching
//
// the package can be used as a general library for chess tool writing and
// provides the core functionality for the zurichess chess engine
//
// position (basic.go, position.go) uses:
//
// * bitboards for representation - https://chessprogramming.wikispaces.com/Bitboards
// * magic bitboards for sliding move generation - https://chessprogramming.wikispaces.com/Magic+Bitboards
//
// search (engine.go) features implemented are:
//
// * aspiration window - https://chessprogramming.wikispaces.com/Aspiration+Windows
// * check extension - https://chessprogramming.wikispaces.com/Check+Extensions
// * fail soft - https://chessprogramming.wikispaces.com/Fail-Soft
// * futility Pruning - https://chessprogramming.wikispaces.com/Futility+pruning
// * history leaf pruning - https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// * killer move heuristic - https://chessprogramming.wikispaces.com/Killer+Heuristic
// * late move redution (LMR) - https://chessprogramming.wikispaces.com/Late+Move+Reductions
// * mate distance pruning - https://chessprogramming.wikispaces.com/Mate+Distance+Pruning
// * negamax framework - http://chessprogramming.wikispaces.com/Alpha-Beta#Implementation-Negamax%20Framework
// * null move prunning (NMP) - https://chessprogramming.wikispaces.com/Null+Move+Pruning
// * principal variation search (PVS) - https://chessprogramming.wikispaces.com/Principal+Variation+Search
// * quiescence search - https://chessprogramming.wikispaces.com/Quiescence+Search
// * razoring - https://chessprogramming.wikispaces.com/Razoring
// * static Single Evaluation - https://chessprogramming.wikispaces.com/Static+Exchange+Evaluation
// * zobrist hashing - https://chessprogramming.wikispaces.com/Zobrist+Hashing
//
// move ordering (move_ordering.go) consists of:
//
// * hash move heuristic
// * captures sorted by MVVLVA - https://chessprogramming.wikispaces.com/MVV-LVA
// * killer moves - https://chessprogramming.wikispaces.com/Killer+Move
// * history Heuristic - https://chessprogramming.wikispaces.com/History+Heuristic
// * countermove Heuristic - https://chessprogramming.wikispaces.com/Countermove+Heuristic
//
// evaluation (material.go) consists of
//
// * material and mobility
// * piece square tables
// * king pawn shield - https://chessprogramming.wikispaces.com/King+Safety
// * king safery ala Toga style - https://chessprogramming.wikispaces.com/King+Safety#Attacking%20King%20Zone
// * pawn structure: connected, isolated, double, passed, rammed. Evaluation is cached (see cache.go)
// * attacks on minors and majors
// * rooks on open and semiopenfiles - https://chessprogramming.wikispaces.com/Rook+on+Open+File
// * tapered evaluation - https://chessprogramming.wikispaces.com/Tapered+Eval
/////////////////////////////////////////////////////////////////////
// member functions
// tryMove descends on the search tree; this function
// is called from searchTree after the move is executed
// and it will undo the move
//
// α, β represent lower and upper bounds
// depth is the remaining depth (decreasing)
// lmr is how much to reduce a late move. Implies non-null move
// nullWindow indicates whether to scout first. Implies non-null move
//
// returns the score from the deeper search
func (eng *Engine) tryMove(α, β, depth, lmr int32, nullWindow bool) int32 {
depth--
score := α + 1
if lmr > 0 { // reduce late moves
score = -eng.searchTree(-α-1, -α, depth-lmr)
}
if score > α { // if late move reduction is disabled or has failed
if nullWindow {
score = -eng.searchTree(-α-1, -α, depth)
if α < score && score < β {
score = -eng.searchTree(-β, -α, depth)
}
} else {
score = -eng.searchTree(-β, -α, depth)
}
}
eng.UndoMove()
return score
}
// isIgnoredRootMove returns true if move should be ignored at root
func (eng *Engine) isIgnoredRootMove(move Move) bool {
if eng.ply() != 0 {
return false
}
for _, m := range eng.ignoreRootMoves {
if m == move {
return true
}
}
for _, m := range eng.onlyRootMoves {
if m == move {
return false
}
}
return len(eng.onlyRootMoves) != 0
}
// searchTree implements searchTree framework
//
// searchTree fails soft, i.e. the score returned can be outside the bounds
//
// α, β represent lower and upper bounds
// depth is the search depth (decreasing)
//
// returns the score of the current position up to depth (modulo reductions/extensions)
// the returned score is from current player's POV
//
// invariants:
// if score <= α then the search failed low and the score is an upper bound
// else if score >= β then the search failed high and the score is a lower bound
// else score is exact
//
// assuming this is a maximizing nodes, failing high means that a
// minimizing ancestor node already has a better alternative
func (eng *Engine) searchTree(α, β, depth int32) int32 {
ply := eng.ply()
pvNode := α+1 < β
pos := eng.Position
us := pos.Us()
// update statistics
eng.Stats.Nodes++
if !eng.stopped && eng.Stats.Nodes >= eng.checkpoint {
eng.checkpoint = eng.Stats.Nodes + checkpointStep
if eng.timeControl.Stopped() {
eng.stopped = true
}
}
if eng.stopped {
return α
}
if pvNode && ply > eng.Stats.SelDepth {
eng.Stats.SelDepth = ply
}
// verify that this is not already an endgame
if score, done := eng.endPosition(); done && (ply != 0 || score != 0) {
// at root we ignore draws because some GUIs don't properly detect
// theoretical draws; e.g. cutechess doesn't detect that kings and
// bishops when all bishops are on the same color; if the position
// is a theoretical draw, keep searching for a move
return score
}
// mate pruning: if an ancestor already has a mate in ply moves then
// the search will always fail low so we return the lowest winning score
if MateScore-ply <= α {
return KnownWinScore
}
// stop searching when the maximum search depth is reached
// depth can be < 0 due to aggressive LMR
if depth <= 0 {
return eng.searchQuiescence(α, β)
}
// check the transposition table
// entry will store the cached static evaluation which may be computed later
entry := eng.retrieveHash()
hash := entry.move
if eng.isIgnoredRootMove(hash) {
entry = hashEntry{}
hash = NullMove
}
if score := int32(entry.score); depth <= int32(entry.depth) &&
isInBounds(entry.kind, α, β, score) &&
(ply != 0 || !eng.isIgnoredRootMove(hash)) {
if pvNode {
// update the pv table, otherwise we risk not having a node at root
// if the pv entry was overwritten
eng.pvTable.Put(pos, hash)
}
if score >= β && hash != NullMove {
// if this is a CUT node, update the killer like in the regular move loop
eng.stack.SaveKiller(hash)
}
return score
}
sideIsChecked := pos.IsChecked(us)
// do a null move; if the null move fails high then the current
// position is too good, so opponent will not play it
// verification that we are not in check is done by tryMove
// which bails out if after the null move we are still in check
if !sideIsChecked && // nullmove is illegal when in check
MinorsAndMajors(pos, us) != 0 && // at least one minor/major piece.
KnownLossScore < α && β < KnownWinScore && // disable in lost or won positions
(entry.kind&hasStatic == 0 || int32(entry.static) >= β) {
eng.DoMove(NullMove)
reduction := 1 + depth/3
score := eng.tryMove(β-1, β, depth-reduction, 0, false)
if score >= β && score < KnownWinScore {
return score
}
}
// razoring at very low depth: if QS is under a considerable margin
// we return that score
if depth == 1 &&
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
rα := α - futilityMargin
if score := eng.searchQuiescence(rα, rα+1); score <= rα {
return score
}
}
// futility and history pruning at frontier nodes
// based on Deep Futility Pruning http://home.hccnet.nl/h.g.muller/deepfut.html
// based on History Leaf Pruning https://chessprogramming.wikispaces.com/History+Leaf+Pruning
// statically evaluates the position. Use static evaluation from hash if available
static := int32(0)
allowLeafsPruning := false
if depth <= futilityDepthLimit && // enable when close to the frontier
!sideIsChecked && // disable in check
!pvNode && // disable in pv nodes
KnownLossScore < α && β < KnownWinScore { // disable when searching for a mate
allowLeafsPruning = true
static = eng.cachedScore(&entry)
}
// principal variation search: search with a null window if there is already a good move
bestMove, localα := NullMove, int32(-InfinityScore)
// dropped true if not all moves were searched
// mate cannot be declared unless all moves were tested
dropped := false
numMoves := int32(0)
eng.stack.GenerateMoves(Violent|Quiet, hash)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
if ply == 0 {
if eng.isIgnoredRootMove(move) {
continue
}
eng.Log.CurrMove(int(depth), move, int(numMoves+1))
}
givesCheck := pos.GivesCheck(move)
critical := move == hash || eng.stack.IsKiller(move)
history := eng.history.get(move)
newDepth := depth
numMoves++
if allowLeafsPruning && !critical && !givesCheck && localα > KnownLossScore {
// prune moves that do not raise alphas and moves that performed bad historically
// prune bad captures moves that performed bad historically
if isFutile(pos, static, α, depth*futilityMargin, move) ||
history < -10 && move.IsQuiet() ||
see(pos, move) < -futilityMargin {
dropped = true
continue
}
}
// extend good moves that also gives check
// see discussion: http://www.talkchess.com/forum/viewtopic.php?t=56361
// when the move gives check, history pruning and futility pruning are also disabled
if givesCheck && !seeSign(pos, move) {
newDepth += checkDepthExtension
critical = true
}
// late move reduction: search best moves with full depth, reduce remaining moves
lmr := int32(0)
if !sideIsChecked && depth > lmrDepthLimit && !critical {
// reduce quiet moves and bad captures more at high depths and after many quiet moves
// large numMoves means it's likely not a CUT node. Large depth means reductions are less risky
if move.IsQuiet() {
if history <= 0 {
lmr = 2 + min(depth, numMoves)/6
} else {
lmr = 1 + min(depth, numMoves)/6
}
} else if see := see(pos, move); see < -futilityMargin {
lmr = 2 + min(depth, numMoves)/6
} else if see < 0 {
lmr = 1 + min(depth, numMoves)/6
}
}
// skip illegal moves that leave the king in check
eng.DoMove(move)
if pos.IsChecked(us) {
eng.UndoMove()
continue
}
score := eng.tryMove(max(α, localα), β, newDepth, lmr, numMoves > 1)
if score >= β {
// fail high, cut node
eng.history.add(move, 5+5*depth)
eng.stack.SaveKiller(move)
eng.updateHash(failedHigh|(entry.kind&hasStatic), depth, score, move, int32(entry.static))
return score
}
if score > localα {
bestMove, localα = move, score
}
eng.history.add(move, -1)
}
bound := getBound(α, β, localα)
if !dropped && bestMove == NullMove {
// if no move was found then the game is over
bound = exact
if sideIsChecked {
localα = MatedScore + ply
} else {
localα = 0
}
}
eng.updateHash(bound|(entry.kind&hasStatic), depth, localα, bestMove, int32(entry.static))
return localα
}
// search starts the search up to depth depth
// the returned score is from current side to move POV
// estimated is the score from previous depths
func (eng *Engine) search(depth, estimated int32) int32 {
// this method only implements aspiration windows
//
// the gradual widening algorithm is the one used by RobboLito
// and Stockfish and it is explained here:
// http://www.talkchess.com/forum/viewtopic.php?topic_view=threads&p=499768&t=46624
γ, δ := estimated, int32(initialAspirationWindow)
α, β := max(γ-δ, -InfinityScore), min(γ+δ, InfinityScore)
score := estimated
if depth < 4 {
// disable aspiration window for very low search depths
α, β = -InfinityScore, +InfinityScore
}
for !eng.stopped {
// at root a non-null move is required, cannot prune based on null-move
score = eng.searchTree(α, β, depth)
if score <= α {
α = max(α-δ, -InfinityScore)
δ += δ / 2
} else if score >= β {
β = min(β+δ, InfinityScore)
δ += δ / 2
} else {
return score
}
}
return score
}
// searchMultiPV searches eng.options.MultiPV principal variations from current position
// returns score and the moves of the highest scoring pv line (possible empty)
// if a pv is not found (e.g. search is stopped during the first ply), return 0, nil
func (eng *Engine) searchMultiPV(depth, estimated int32) (int32, []Move) {
type pv struct {
score int32
moves []Move
}
multiPV := eng.Options.MultiPV
searchMultiPV := (eng.Options.HandicapLevel+4)/5 + 1
if multiPV < searchMultiPV {
multiPV = searchMultiPV
}
pvs := make([]pv, 0, multiPV)
eng.ignoreRootMoves = eng.ignoreRootMoves[:0]
for p := 0; p < multiPV; p++ {
if eng.UseAB {
// search using naive alphabeta
estimated = eng.searchAB(depth, estimated)
} else {
estimated = eng.search(depth, estimated)
}
if eng.stopped {
break // if eng has been stopped then this is not a legit pv
}
var moves []Move
if eng.UseAB {
// get pev from naive alphabeta's pv table
moves = eng.pvTableAB.Get(eng.Position)
} else {
moves = eng.pvTable.Get(eng.Position)
}
hasPV := len(moves) != 0 && !eng.isIgnoredRootMove(moves[0])
if p == 0 || hasPV { // at depth 0 we might not get a PV
pvs = append(pvs, pv{estimated, moves})
}
if !hasPV {
break
}
// if there is PV ignore the first move for the next PVs
eng.ignoreRootMoves = append(eng.ignoreRootMoves, moves[0])
}
// sort PVs by score
if len(pvs) == 0 {
return 0, nil
}
for i := range pvs {
for j := i; j >= 0; j-- {
if j == 0 || pvs[j-1].score > pvs[i].score {
tmp := pvs[i]
copy(pvs[j+1:i+1], pvs[j:i])
pvs[j] = tmp
break
}
}
}
for i := range pvs {
eng.Log.PrintPV(eng.Stats, i+1, pvs[i].score, pvs[i].moves)
}
// for best play return the PV with highest score
if eng.Options.HandicapLevel == 0 || len(pvs) <= 1 {
return pvs[0].score, pvs[0].moves
}
// PVs are sorted by score. Pick one PV at random
// and if the score is not too far off, return it
s := int32(eng.Options.HandicapLevel)
d := s*s/2 + s*10 + 5
n := rand.Intn(len(pvs))
for pvs[n].score+d < pvs[0].score {
n--
}
return pvs[n].score, pvs[n].moves
}
// Play evaluates current position. See PlayMoves for the returned values
func (eng *Engine) Play(tc *TimeControl) (score int32, moves []Move) {
return eng.PlayMoves(tc, nil)
}
// PlayMoves evaluates current position searching only moves specifid by rootMoves
//
// returns the principal variation, that is
// moves[0] is the best move found and
// moves[1] is the pondering move
//
// if rootMoves is nil searches all root moves
//
// returns a nil pv if no move was found because the game is already finished
// returns empty pv array if it's valid position, but no pv was found (e.g. search depth is 0)
//
// Time control, tc, should already be started
func (eng *Engine) PlayMoves(tc *TimeControl, rootMoves []Move) (score int32, moves []Move) {
if !initialized {
initEngine()
}
eng.Log.BeginSearch()
eng.Stats = Stats{Depth: -1}
eng.rootPly = eng.Position.Ply
eng.timeControl = tc
eng.stopped = false
eng.checkpoint = checkpointStep
eng.stack.Reset(eng.Position)
eng.history.newSearch()
eng.onlyRootMoves = rootMoves
for depth := int32(0); depth < 64; depth++ {
if !tc.NextDepth(depth) {
// stop if tc control says we are done
// search at least one depth, otherwise a move cannot be returned
break
}
eng.Stats.Depth = depth
if s, m := eng.searchMultiPV(depth, score); len(moves) == 0 || len(m) != 0 {
score, moves = s, m
}
}
eng.Log.EndSearch()
if len(moves) == 0 && !eng.Position.HasLegalMoves() {
return 0, nil
} else if moves == nil {
return score, []Move{}
}
return score, moves
}
// ply returns the ply from the beginning of the search
func (eng *Engine) ply() int32 {
return int32(eng.Position.Ply - eng.rootPly)
}
// SetPosition sets current position
// if pos is nil, the starting position is set
func (eng *Engine) SetPosition(pos *Position) {
if pos != nil {
eng.Position = pos
} else {
eng.Position, _ = PositionFromFEN(FENStartPos)
}
}
// DoMove executes a move.
func (eng *Engine) DoMove(move Move) {
eng.Position.DoMove(move)
GlobalHashTable.prefetch(eng.Position)
}
// UndoMove undoes the last move
func (eng *Engine) UndoMove() {
eng.Position.UndoMove()
}
// Score evaluates current position from current player's POV
func (eng *Engine) Score() int32 {
return Evaluate(eng.Position).GetCentipawnsScore() * eng.Position.Us().Multiplier()
}
// cachedScore implements a cache on top of Score
// the cached static evaluation is stored in the hashEntry
func (eng *Engine) cachedScore(e *hashEntry) int32 {
if e.kind&hasStatic == 0 {
e.kind |= hasStatic
| int16(eng.Score())
}
return int32(e.static)
}
// endPosition determines whether the current position is an end game
// returns score and a bool if the game has ended
func (eng *Engine) endPosition() (int32, bool) {
pos := eng.Position // shortcut
// trivial cases when kings are missing
if Kings(pos, White) == 0 {
if Kings(pos, Black) == 0 {
return 0, true // both kings are missing
}
return pos.Us().Multiplier() * (MatedScore + eng.ply()), true
}
if Kings(pos, Black) == 0 {
return pos.Us().Multiplier() * (MateScore - eng.ply()), true
}
// neither side can mate
if pos.InsufficientMaterial() {
return 0, true
}
// fifty full moves without a capture or a pawn move
if pos.FiftyMoveRule() {
return 0, true
}
// repetition is a draw
// at root we need to continue searching even if we saw two repetitions already,
// however we can prune deeper search only at two repetitions
if r := pos.ThreeFoldRepetition(); eng.ply() > 0 && r >= 2 || r >= 3 {
return 0, true
}
return 0, false
}
// retrieveHash gets from GlobalHashTable the current position
func (eng *Engine) retrieveHash() hashEntry {
entry := GlobalHashTable.get(eng.Position)
if entry.kind == 0 || entry.move != NullMove && !eng.Position.IsPseudoLegal(entry.move) {
eng.Stats.CacheMiss++
return hashEntry{}
}
// return mate score relative to root
// the score was adjusted relative to position before the hash table was updated
if entry.score < KnownLossScore {
entry.score += int16(eng.ply())
} else if entry.score > KnownWinScore {
entry.score -= int16(eng.ply())
}
eng.Stats.CacheHit++
return entry
}
// updateHash updates GlobalHashTable with the current position
func (eng *Engine) updateHash(flags hashFlags, depth, score int32, move Move, static int32) {
// if search is stopped then score cannot be trusted
if eng.stopped {
return
}
// update principal variation table in exact nodes
if flags&exact != 0 {
eng.pvTable.Put(eng.Position, move)
}
if eng.ply() == 0 && (len(eng.ignoreRootMoves) != 0 || len(eng.onlyRootMoves) != 0) {
// at root if there are moves to ignore (e.g. because of multipv)
// then this is an incomplete search, so don't update the hash
return
}
// save the mate score relative to the current position
// when retrieving from hash the score will be adjusted relative to root
if score < KnownLossScore {
score -= eng.ply()
} else if score > KnownWinScore {
score += eng.ply()
}
GlobalHashTable.put(eng.Position, hashEntry{
kind: flags,
score: int16(score),
depth: int8(depth),
move: move,
static: int16(static),
})
}
// searchQuiescence evaluates the position after solving all captures
//
// this is a very limited search which considers only some violent moves
// depth is ignored, so hash uses depth 0; search continues until
// stand pat or no capture can improve the score
func (eng *Engine) searchQuiescence(α, β int32) int32 {
eng.Stats.Nodes++
entry := eng.retrieveHash()
if score := int32(entry.score); isInBounds(entry.kind, α, β, score) {
return score
}
static := eng.cachedScore(&entry)
if static >= β {
// stand pat if the static score is already a cut-off
eng.updateHash(failedHigh|hasStatic, 0, static, entry.move, static)
return static
}
pos := eng.Position
us := pos.Us()
inCheck := pos.IsChecked(us)
localα := max(α, static)
bestMove := entry.move
eng.stack.GenerateMoves(Violent, NullMove)
for move := eng.stack.PopMove(); move != NullMove; move = eng.stack.PopMove() {
// prune futile moves that would anyway result in a stand-pat at that next depth
if !inCheck && isFutile(pos, static, α, futilityMargin, move) ||
!inCheck && seeSign(pos, move) {
continue
}
// discard illegal or losing captures
eng.DoMove(move)
if eng.Position.IsChecked(us) {
eng.UndoMove()
continue
}
score := -eng.searchQuiescence(-β, -localα)
eng.UndoMove()
if score >= β {
eng.updateHash(failedHigh|hasStatic, 0, score, move, static)
return score
}
if score > localα {
localα = score
bestMove = move
}
}
eng.updateHash(getBound(α, β, localα)|hasStatic, 0, localα, bestMove, static)
return localα
}
func initEngine() {
var fens = [FigureArraySize]string{
Pawn: "rnbqkbnr/ppp1pppp/8/8/3P4/8/PPP1PPPP/RNBQKBNR w - - 0 1",
Knight: "r1bqkbnr/pppppppp/8/8/3N4/8/PPPPPPPP/R1BQKBNR w - - 0 1",
Bishop: "rn1qkbnr/pppppppp/8/8/3B4/8/PPPPPPPP/RN1QKBNR w - - 0 1",
Rook: "rnbqkbn1/pppppppp/8/8/3R4/8/PPPPPPPP/RNBQKBN1 w - - 0 1",
Queen: "rnb1kbnr/pppppppp/8/8/3Q4/8/PPPPPPPP/RNB1KBNR w - - 0 1",
}
for f, fen := range fens {
if fen != "" {
pos, _ := PositionFromFEN(fen)
futilityFigureBonus[f] = Evaluate(pos).GetCentipawnsScore()
}
}
initialized = true
}
/////////////////////////////////////////////////////////////////////
| e.static = | identifier_name |
app.component.ts | import { Component, ElementRef, HostListener, Inject, LOCALE_ID, OnInit } from '@angular/core';
import { DOCUMENT } from '@angular/platform-browser';
import { HttpClient, HttpEvent, HttpInterceptor, HttpHandler, HttpRequest, HttpHeaders } from '@angular/common/http';
import { SimpleTimer } from 'ng2-simple-timer';
import { environment } from '../environments/environment';
import { Event, Timeslot } from './model/o365.model';
import { IKeyboardLayout, MD_KEYBOARD_LAYOUTS, MdKeyboardComponent, MdKeyboardRef, MdKeyboardService } from '@ngx-material-keyboard/core';
import * as angular from 'angular';
export class Resource {
id: string;
busy: boolean;
name: string;
o365Name: string;
}
export class TimeIncrement {
id: number;
value: string;
dateTimeValue: Date;
}
const RESOURCE: Resource = {
id: environment.hostname,
name: environment.resource_name,
o365Name: environment.resource_id,
busy: false
}
const hostname = environment.hostname;
const ip = environment.hostIP;
declare var newEvent: Event;
const NOEVENTS_MESSAGES: string[] = ["No Events Today", "Your schedule is clear", "My schedule is wide open"]
const TIMEZONE = environment.timezone;
declare var timeoutID: number;
declare var timeoutTTL: number;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css','./keyboard.css']
})
export class AppComponent implements OnInit {
debug = environment.debug;
transitionTimer: SimpleTimer;
controller = this.controller;
allowBookNowFunction = environment.allow_book_now_function;
bookEvent: boolean;
calendarWorkdayEndHour: number;
calendarWorkdayStartHour: number;
cancellation: boolean;
currentEvent: Event;
currentTimeout: any;
currentTimeoutTTL = 0;
eventData: string;
timePeriod: Timeslot;
date: Date;
dayMillis: number;
timeOptions = {
hour: "2-digit", minute: "2-digit"
};
events: Event[] = [];
helpRequested: boolean;
helpInformation: boolean;
helpPressed: boolean;
LOCALE = "en-us";
modalTransitionTimerCounter = 0;
modalTransitionTimerID = "modalTransitionTimer";
modalTimeout = environment.popupWindowTimeout;
newEvent: Event;
//newEventTitle: string;
newEventTitle = "Ad-hoc Meeting";
newEventEndTimeId: string;
newEventEndTimeValue: string;
newEventStartTimeId: string;
newEventStartTimeValue: string;
noEvents: boolean;
noEvents_message = "No Events Today";
numTimeslots: number = 0;
occupied: boolean;
refHours: string[] = [];
resource = RESOURCE;
restartRequested: boolean;
showAgenda: boolean;
showHelpButton = environment.showHelpButton;
showWaitSpinner: boolean;
selectedEvent: Event;
selectedStartValue: number;
timeIncrement = environment.time_slot_size; // minutes to increment select boxes by
timeSlots: Timeslot[] = [];
title = 'Room Scheduler';
schedulingWindow = 5; // minutes after a time window start time when the resource still be scheduled
unoccupied: boolean;
validTimeIncrements: TimeIncrement[] = [];
percentOfDayExpended: number;
private _keyboardRef: MdKeyboardRef<MdKeyboardComponent>;
darkTheme: boolean;
duration: number;
hasAction: boolean;
isDebug: boolean;
defaultLocale: string;
layout: string;
layouts: {
name: string;
layout: IKeyboardLayout;
}[];
get keyboardVisible(): boolean {
return this._keyboardService.isOpened;
}
constructor(private _keyboardService: MdKeyboardService,
@Inject(LOCALE_ID) public locale,
@Inject(MD_KEYBOARD_LAYOUTS) private _layouts,
private http: HttpClient) { }
ngOnInit(): void {
//var that = this;
window.addEventListener('load', function(){
document.addEventListener('touchstart', function(e){
if (timeoutID > 0 && timeoutID != null){
window.clearTimeout(timeoutID);
timeoutID = window.setTimeout(timeoutTTL);
}
e.preventDefault()
}, false)
}, false)
this.noEvents = true;
this.defaultLocale = ` ${this.LOCALE}`.slice(1);
this.utcTime();
this.transitionTimer = new SimpleTimer();
//console.log(this.timeSlots);
this.bookEvent = false;
this.cancellation = false;
this.calendarWorkdayEndHour = 17;
this.calendarWorkdayStartHour = 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() |
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
var minutes: number = now.getMinutes() * 60;
var seconds: number = now.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = now.getTime();
now.setHours(0);
now.setMinutes(0);
now.setSeconds(0);
var t2 = now.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}
deriveVariablesFromHostname(res: Resource): void {
var buildingAndRoom = hostname.split(" ", 2);
var building = buildingAndRoom[0];
var room = buildingAndRoom[1];
res.id = building + "-" + room;
res.busy = false;
res.name = building + " " + room;
res.o365Name = res.id;
}
durationString(selectedEvent): string {
var duration = "";
var Date_Start = new Date(selectedEvent.start);
var Date_End = new Date(selectedEvent.end);
var Difference = Date_End.valueOf() - Date_Start.valueOf();
var diffDays = Math.floor(Difference / 86400000); // days
var diffHrs = Math.floor((Difference % 86400000) / 3600000); // hours
var diffMins = Math.round(((Difference % 86400000) % 3600000) / 60000); // minutes
if (diffMins > 0) {
duration = diffMins.toString() + " Minutes"
}
if (diffHrs > 0) {
duration = diffHrs + " Hours " + duration;
}
return (duration);
}
evalTime(): void {
//this.refreshData();
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
this.unoccupied = !(this.occupied);
}
getSelectedText(elementId,index): string {
var elem = document.getElementById(elementId).getElementsByTagName( 'option' )[index];
return elem.text;
}
helpClick(): void {
this.helpPressed = true;
this.startScreenResetTimeout(10);
}
helpInformationRequest(): void {
this.helpPressed = false;
this.helpInformation = true;
//this.resetModal();
// show information;
}
helpRequest(): void {
this.helpPressed = false;
this.helpRequested = true;
var resp = this.http.post(environment.slack_webhook_url, "{\"text\":\"Help request from " + this.resource.name + "\"}").subscribe();
////console.log(resp);
this.startScreenResetTimeout(3);
}
modalTimerCallback(): void {
if (this.modalTransitionTimerCounter <= this.modalTimeout) {
this.modalTransitionTimerCounter++;
} else {
this.subscribeHelpTimer();
this.resetModal();
}
}
onSelect(event: Event): void {
this.selectedEvent = event;
}
onStartChange(selectedStartOption): void {
var i = Number(selectedStartOption) + 1;
this.newEventEndTimeId = i.toString();
this.getNewStartTime(selectedStartOption);
this.getNewEndTime(this.newEventEndTimeId);
}
percent(): void {
setInterval(function() {
var secondsInADay = 24 * 60 * 60;
var now = new Date();
var hours = now.getHours() * 60 * 60;
var minutes = now.getMinutes() * 60;
var seconds = now.getSeconds();
var totalSeconds = hours + minutes + seconds;
var percentSeconds = 100 * totalSeconds / secondsInADay;
this.percentOfDayExpended = percentSeconds;
}, 1000);
}
refreshData(): void {
this.populateRefHours();
this.events = [];
this.noEvents = true;
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
this.http.get(url).subscribe(data => {
angular.forEach(data, function(obj) {
var e = new Event();
e.Subject = obj.subject;
e.Start = obj.start;
e.End = obj.end;
this.events.push(e);
this.noEvents = false;
}, this);
});
/*for (var i = 0; i < this.timeSlots.length; i++) {
var e = new Event();
e.Subject = "Available";
e.Start = this.timeSlots[i].Start;
e.End = this.timeSlots[i].End;
this.events.push(e);
}
this.consolidate_events();*/
this.currentMeeting();
}
reset(): void {
this.refreshData();
this.bookEvent = false;
this.cancellation = false;
this.helpInformation = false;
this.helpPressed = false;
this.helpRequested = false;
this.newEventEndTimeId = null;
this.newEventStartTimeId = null;
this.restartRequested = false;
this.showAgenda = false;
this.showWaitSpinner = false;
}
resetModal(): void {
this.helpPressed = false;
this.helpRequested = false;
var m = document.getElementsByClassName("modalContent");
for (var mChild in m) {
setTimeout(function() {
var m = document.getElementsByClassName("modal")[0];
m.classList.add("hidden");
}, 2000,this);
}
}
resetTransitionTimer(): void {
this.transitionTimer.delTimer('modalTransition');
}
restartBrowser(): void {
this.helpInformation = false;
this.restartRequested = true;
this.startScreenResetTimeout(3);
this.refreshData();
window.location.reload(false);
}
resetTimeouts(): void {
this.startScreenResetTimeout(this.currentTimeoutTTL);
}
scheduleEvent(): void {
this.reset();
this.startScreenResetTimeout(10);
//this.refreshData();
this.showAgenda = true;
}
scrollReferenceEvent(elem): void {
var a = document.getElementById("agenda");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
scrollAgendaEvent(elem): void {
var a = document.getElementById("refHours");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
selectByClass(selector: string): HTMLCollectionOf<Element> {
var elements = document.getElementsByClassName(selector);
return elements;
}
selectById(selector: string): HTMLElement {
var element = document.getElementById(selector);
return element;
}
startScreenResetTimeout(ttl): void { //ttl in s
var t = ttl * 1000; //convert s to ms
this.currentTimeoutTTL = t;
var that = this;
this.stopScreenResetTimeout();
this.currentTimeout = setTimeout(function(){
that.reset();
that.closeCurrentKeyboard();
},t);
}
stopScreenResetTimeout(): void {
if (this.currentTimeout != null) {
clearTimeout(this.currentTimeout);
}
}
getNewEndTime(newTime): void {
this.newEventEndTimeValue = this.getSelectedText("newEventEndTime",newTime);
//console.log("end: " + this.newEventEndTimeValue);
}
getNewStartTime(newTime): void{
this.newEventStartTimeValue = this.getSelectedText("newEventStartTime",newTime);
//console.log("start: " + this.newEventStartTimeValue);
}
submitEventForm(): void {
this.showWaitSpinner=true;
var e = this.newEventEndTimeValue;
var s = this.newEventStartTimeValue;
this.submitEvent("Ad-hoc Meeting", s,e);
}
submitEvent(tmpSubject: string, tmpStartTime: string, tmpEndTime: string): void {
var req = new Event();
var today = new Date();
var M = today.getMonth(); //month is zero-indexed
var d = today.getDate();
var y = today.getFullYear();
var tzoffset = today.getTimezoneOffset();
var sH = 0;
var sM = 0;
var eH = 0;
var eM = 0;
const [starttime, startmodifier] = tmpStartTime.split(' ');
let [starthours, startminutes] = starttime.split(':');
if (starthours === '12') {
starthours = '00';
}
if (startmodifier === 'PM') {
sH = parseInt(starthours, 10) + 12;
}
else {
sH = parseInt(starthours)
}
sM = parseInt(startminutes);
const [endtime, endmodifier] = tmpEndTime.split(' ');
let [endhours, endminutes] = endtime.split(':');
if (endhours === '12') {
endhours = '00';
}
if (endmodifier === 'PM') {
eH = parseInt(endhours, 10) + 12;
}
else{
eH = parseInt(endhours)
}
eM = parseInt(endminutes);
//new Date(year, month, day, hours, minutes, seconds, milliseconds);
var startTime = new Date(y,M,d,sH,sM,0);
var endTime = new Date(y,M,d,eH,eM,0);
req.Subject = tmpSubject;
req.Start = new Date(startTime.getTime() - tzoffset*60000);
req.End = new Date(endTime.getTime() - tzoffset*60000);
/////////
/// SUBMIT
///////
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
var resp = this.http.post(url,JSON.stringify(req),{headers: new HttpHeaders().set('Content-Type', 'application/json')}).subscribe();
//this.restartRequested = true;
this.startScreenResetTimeout(1);
//this.refreshData();
//window.location.reload(false);
}
subscribeHelpTimer(): void {
if (this.modalTransitionTimerID) {
// Unsubscribe if timer Id is defined
this.transitionTimer.unsubscribe(this.modalTransitionTimerID);
this.modalTransitionTimerCounter = 0;
} else {
// Subscribe if timer Id is undefined
this.modalTransitionTimerID = this.transitionTimer.subscribe('modalTransition', () => this.modalTimerCallback());
}
}
utcTime(): void {
setInterval(() => {
this.date = new Date();
this.timePeriod = this.timeSlots[this.currentTimePeriod()];
this.percent();
this.currentMeeting();
this.evalTime();
}, 1000);
}
wait(): void {
this.showWaitSpinner = true;
}
}
| {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
} | identifier_body |
app.component.ts | import { Component, ElementRef, HostListener, Inject, LOCALE_ID, OnInit } from '@angular/core';
import { DOCUMENT } from '@angular/platform-browser';
import { HttpClient, HttpEvent, HttpInterceptor, HttpHandler, HttpRequest, HttpHeaders } from '@angular/common/http';
import { SimpleTimer } from 'ng2-simple-timer';
import { environment } from '../environments/environment';
import { Event, Timeslot } from './model/o365.model';
import { IKeyboardLayout, MD_KEYBOARD_LAYOUTS, MdKeyboardComponent, MdKeyboardRef, MdKeyboardService } from '@ngx-material-keyboard/core';
import * as angular from 'angular';
export class Resource {
id: string;
busy: boolean;
name: string;
o365Name: string;
}
export class TimeIncrement {
id: number;
value: string;
dateTimeValue: Date;
}
const RESOURCE: Resource = {
id: environment.hostname,
name: environment.resource_name,
o365Name: environment.resource_id,
busy: false
}
const hostname = environment.hostname;
const ip = environment.hostIP;
declare var newEvent: Event;
const NOEVENTS_MESSAGES: string[] = ["No Events Today", "Your schedule is clear", "My schedule is wide open"]
const TIMEZONE = environment.timezone;
declare var timeoutID: number;
declare var timeoutTTL: number;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css','./keyboard.css']
})
export class AppComponent implements OnInit {
debug = environment.debug;
transitionTimer: SimpleTimer;
controller = this.controller;
allowBookNowFunction = environment.allow_book_now_function;
bookEvent: boolean;
calendarWorkdayEndHour: number;
calendarWorkdayStartHour: number;
cancellation: boolean;
currentEvent: Event;
currentTimeout: any;
currentTimeoutTTL = 0;
eventData: string;
timePeriod: Timeslot;
date: Date;
dayMillis: number;
timeOptions = {
hour: "2-digit", minute: "2-digit"
};
events: Event[] = [];
helpRequested: boolean;
helpInformation: boolean;
helpPressed: boolean;
LOCALE = "en-us";
modalTransitionTimerCounter = 0;
modalTransitionTimerID = "modalTransitionTimer";
modalTimeout = environment.popupWindowTimeout;
newEvent: Event;
//newEventTitle: string;
newEventTitle = "Ad-hoc Meeting";
newEventEndTimeId: string;
newEventEndTimeValue: string;
newEventStartTimeId: string;
newEventStartTimeValue: string;
noEvents: boolean;
noEvents_message = "No Events Today";
numTimeslots: number = 0;
occupied: boolean;
refHours: string[] = [];
resource = RESOURCE;
restartRequested: boolean;
showAgenda: boolean;
showHelpButton = environment.showHelpButton;
showWaitSpinner: boolean;
selectedEvent: Event;
selectedStartValue: number;
timeIncrement = environment.time_slot_size; // minutes to increment select boxes by
timeSlots: Timeslot[] = [];
title = 'Room Scheduler';
schedulingWindow = 5; // minutes after a time window start time when the resource still be scheduled
unoccupied: boolean;
validTimeIncrements: TimeIncrement[] = [];
percentOfDayExpended: number;
private _keyboardRef: MdKeyboardRef<MdKeyboardComponent>;
darkTheme: boolean;
duration: number;
hasAction: boolean;
isDebug: boolean;
defaultLocale: string;
layout: string;
layouts: {
name: string;
layout: IKeyboardLayout;
}[];
get keyboardVisible(): boolean {
return this._keyboardService.isOpened;
}
constructor(private _keyboardService: MdKeyboardService,
@Inject(LOCALE_ID) public locale,
@Inject(MD_KEYBOARD_LAYOUTS) private _layouts,
private http: HttpClient) { }
ngOnInit(): void {
//var that = this;
window.addEventListener('load', function(){
document.addEventListener('touchstart', function(e){
if (timeoutID > 0 && timeoutID != null){
window.clearTimeout(timeoutID);
timeoutID = window.setTimeout(timeoutTTL);
}
e.preventDefault()
}, false)
}, false)
this.noEvents = true;
this.defaultLocale = ` ${this.LOCALE}`.slice(1);
this.utcTime();
this.transitionTimer = new SimpleTimer();
//console.log(this.timeSlots);
this.bookEvent = false;
this.cancellation = false;
this.calendarWorkdayEndHour = 17;
this.calendarWorkdayStartHour = 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
| (): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
var minutes: number = now.getMinutes() * 60;
var seconds: number = now.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = now.getTime();
now.setHours(0);
now.setMinutes(0);
now.setSeconds(0);
var t2 = now.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}
deriveVariablesFromHostname(res: Resource): void {
var buildingAndRoom = hostname.split(" ", 2);
var building = buildingAndRoom[0];
var room = buildingAndRoom[1];
res.id = building + "-" + room;
res.busy = false;
res.name = building + " " + room;
res.o365Name = res.id;
}
durationString(selectedEvent): string {
var duration = "";
var Date_Start = new Date(selectedEvent.start);
var Date_End = new Date(selectedEvent.end);
var Difference = Date_End.valueOf() - Date_Start.valueOf();
var diffDays = Math.floor(Difference / 86400000); // days
var diffHrs = Math.floor((Difference % 86400000) / 3600000); // hours
var diffMins = Math.round(((Difference % 86400000) % 3600000) / 60000); // minutes
if (diffMins > 0) {
duration = diffMins.toString() + " Minutes"
}
if (diffHrs > 0) {
duration = diffHrs + " Hours " + duration;
}
return (duration);
}
evalTime(): void {
//this.refreshData();
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
this.unoccupied = !(this.occupied);
}
getSelectedText(elementId,index): string {
var elem = document.getElementById(elementId).getElementsByTagName( 'option' )[index];
return elem.text;
}
helpClick(): void {
this.helpPressed = true;
this.startScreenResetTimeout(10);
}
helpInformationRequest(): void {
this.helpPressed = false;
this.helpInformation = true;
//this.resetModal();
// show information;
}
helpRequest(): void {
this.helpPressed = false;
this.helpRequested = true;
var resp = this.http.post(environment.slack_webhook_url, "{\"text\":\"Help request from " + this.resource.name + "\"}").subscribe();
////console.log(resp);
this.startScreenResetTimeout(3);
}
modalTimerCallback(): void {
if (this.modalTransitionTimerCounter <= this.modalTimeout) {
this.modalTransitionTimerCounter++;
} else {
this.subscribeHelpTimer();
this.resetModal();
}
}
onSelect(event: Event): void {
this.selectedEvent = event;
}
onStartChange(selectedStartOption): void {
var i = Number(selectedStartOption) + 1;
this.newEventEndTimeId = i.toString();
this.getNewStartTime(selectedStartOption);
this.getNewEndTime(this.newEventEndTimeId);
}
percent(): void {
setInterval(function() {
var secondsInADay = 24 * 60 * 60;
var now = new Date();
var hours = now.getHours() * 60 * 60;
var minutes = now.getMinutes() * 60;
var seconds = now.getSeconds();
var totalSeconds = hours + minutes + seconds;
var percentSeconds = 100 * totalSeconds / secondsInADay;
this.percentOfDayExpended = percentSeconds;
}, 1000);
}
refreshData(): void {
this.populateRefHours();
this.events = [];
this.noEvents = true;
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
this.http.get(url).subscribe(data => {
angular.forEach(data, function(obj) {
var e = new Event();
e.Subject = obj.subject;
e.Start = obj.start;
e.End = obj.end;
this.events.push(e);
this.noEvents = false;
}, this);
});
/*for (var i = 0; i < this.timeSlots.length; i++) {
var e = new Event();
e.Subject = "Available";
e.Start = this.timeSlots[i].Start;
e.End = this.timeSlots[i].End;
this.events.push(e);
}
this.consolidate_events();*/
this.currentMeeting();
}
reset(): void {
this.refreshData();
this.bookEvent = false;
this.cancellation = false;
this.helpInformation = false;
this.helpPressed = false;
this.helpRequested = false;
this.newEventEndTimeId = null;
this.newEventStartTimeId = null;
this.restartRequested = false;
this.showAgenda = false;
this.showWaitSpinner = false;
}
resetModal(): void {
this.helpPressed = false;
this.helpRequested = false;
var m = document.getElementsByClassName("modalContent");
for (var mChild in m) {
setTimeout(function() {
var m = document.getElementsByClassName("modal")[0];
m.classList.add("hidden");
}, 2000,this);
}
}
resetTransitionTimer(): void {
this.transitionTimer.delTimer('modalTransition');
}
restartBrowser(): void {
this.helpInformation = false;
this.restartRequested = true;
this.startScreenResetTimeout(3);
this.refreshData();
window.location.reload(false);
}
resetTimeouts(): void {
this.startScreenResetTimeout(this.currentTimeoutTTL);
}
scheduleEvent(): void {
this.reset();
this.startScreenResetTimeout(10);
//this.refreshData();
this.showAgenda = true;
}
scrollReferenceEvent(elem): void {
var a = document.getElementById("agenda");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
scrollAgendaEvent(elem): void {
var a = document.getElementById("refHours");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
selectByClass(selector: string): HTMLCollectionOf<Element> {
var elements = document.getElementsByClassName(selector);
return elements;
}
selectById(selector: string): HTMLElement {
var element = document.getElementById(selector);
return element;
}
startScreenResetTimeout(ttl): void { //ttl in s
var t = ttl * 1000; //convert s to ms
this.currentTimeoutTTL = t;
var that = this;
this.stopScreenResetTimeout();
this.currentTimeout = setTimeout(function(){
that.reset();
that.closeCurrentKeyboard();
},t);
}
stopScreenResetTimeout(): void {
if (this.currentTimeout != null) {
clearTimeout(this.currentTimeout);
}
}
getNewEndTime(newTime): void {
this.newEventEndTimeValue = this.getSelectedText("newEventEndTime",newTime);
//console.log("end: " + this.newEventEndTimeValue);
}
getNewStartTime(newTime): void{
this.newEventStartTimeValue = this.getSelectedText("newEventStartTime",newTime);
//console.log("start: " + this.newEventStartTimeValue);
}
submitEventForm(): void {
this.showWaitSpinner=true;
var e = this.newEventEndTimeValue;
var s = this.newEventStartTimeValue;
this.submitEvent("Ad-hoc Meeting", s,e);
}
submitEvent(tmpSubject: string, tmpStartTime: string, tmpEndTime: string): void {
var req = new Event();
var today = new Date();
var M = today.getMonth(); //month is zero-indexed
var d = today.getDate();
var y = today.getFullYear();
var tzoffset = today.getTimezoneOffset();
var sH = 0;
var sM = 0;
var eH = 0;
var eM = 0;
const [starttime, startmodifier] = tmpStartTime.split(' ');
let [starthours, startminutes] = starttime.split(':');
if (starthours === '12') {
starthours = '00';
}
if (startmodifier === 'PM') {
sH = parseInt(starthours, 10) + 12;
}
else {
sH = parseInt(starthours)
}
sM = parseInt(startminutes);
const [endtime, endmodifier] = tmpEndTime.split(' ');
let [endhours, endminutes] = endtime.split(':');
if (endhours === '12') {
endhours = '00';
}
if (endmodifier === 'PM') {
eH = parseInt(endhours, 10) + 12;
}
else{
eH = parseInt(endhours)
}
eM = parseInt(endminutes);
//new Date(year, month, day, hours, minutes, seconds, milliseconds);
var startTime = new Date(y,M,d,sH,sM,0);
var endTime = new Date(y,M,d,eH,eM,0);
req.Subject = tmpSubject;
req.Start = new Date(startTime.getTime() - tzoffset*60000);
req.End = new Date(endTime.getTime() - tzoffset*60000);
/////////
/// SUBMIT
///////
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
var resp = this.http.post(url,JSON.stringify(req),{headers: new HttpHeaders().set('Content-Type', 'application/json')}).subscribe();
//this.restartRequested = true;
this.startScreenResetTimeout(1);
//this.refreshData();
//window.location.reload(false);
}
subscribeHelpTimer(): void {
if (this.modalTransitionTimerID) {
// Unsubscribe if timer Id is defined
this.transitionTimer.unsubscribe(this.modalTransitionTimerID);
this.modalTransitionTimerCounter = 0;
} else {
// Subscribe if timer Id is undefined
this.modalTransitionTimerID = this.transitionTimer.subscribe('modalTransition', () => this.modalTimerCallback());
}
}
utcTime(): void {
setInterval(() => {
this.date = new Date();
this.timePeriod = this.timeSlots[this.currentTimePeriod()];
this.percent();
this.currentMeeting();
this.evalTime();
}, 1000);
}
wait(): void {
this.showWaitSpinner = true;
}
}
| populateTimeslots | identifier_name |
app.component.ts | import { Component, ElementRef, HostListener, Inject, LOCALE_ID, OnInit } from '@angular/core';
import { DOCUMENT } from '@angular/platform-browser';
import { HttpClient, HttpEvent, HttpInterceptor, HttpHandler, HttpRequest, HttpHeaders } from '@angular/common/http';
import { SimpleTimer } from 'ng2-simple-timer';
import { environment } from '../environments/environment';
import { Event, Timeslot } from './model/o365.model';
import { IKeyboardLayout, MD_KEYBOARD_LAYOUTS, MdKeyboardComponent, MdKeyboardRef, MdKeyboardService } from '@ngx-material-keyboard/core';
import * as angular from 'angular';
export class Resource {
id: string;
busy: boolean;
name: string;
o365Name: string;
}
export class TimeIncrement {
id: number;
value: string;
dateTimeValue: Date;
}
const RESOURCE: Resource = {
id: environment.hostname,
name: environment.resource_name,
o365Name: environment.resource_id,
busy: false
}
const hostname = environment.hostname;
const ip = environment.hostIP;
declare var newEvent: Event;
const NOEVENTS_MESSAGES: string[] = ["No Events Today", "Your schedule is clear", "My schedule is wide open"]
const TIMEZONE = environment.timezone;
declare var timeoutID: number;
declare var timeoutTTL: number;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css','./keyboard.css']
})
export class AppComponent implements OnInit {
debug = environment.debug;
transitionTimer: SimpleTimer;
controller = this.controller;
allowBookNowFunction = environment.allow_book_now_function;
bookEvent: boolean;
calendarWorkdayEndHour: number;
calendarWorkdayStartHour: number;
cancellation: boolean;
currentEvent: Event;
currentTimeout: any;
currentTimeoutTTL = 0;
eventData: string;
timePeriod: Timeslot;
date: Date;
dayMillis: number;
timeOptions = {
hour: "2-digit", minute: "2-digit"
};
events: Event[] = [];
helpRequested: boolean;
helpInformation: boolean;
helpPressed: boolean;
LOCALE = "en-us";
modalTransitionTimerCounter = 0;
modalTransitionTimerID = "modalTransitionTimer";
modalTimeout = environment.popupWindowTimeout;
newEvent: Event;
//newEventTitle: string;
newEventTitle = "Ad-hoc Meeting";
newEventEndTimeId: string;
newEventEndTimeValue: string;
newEventStartTimeId: string;
newEventStartTimeValue: string;
noEvents: boolean;
noEvents_message = "No Events Today";
numTimeslots: number = 0;
occupied: boolean;
refHours: string[] = [];
resource = RESOURCE;
restartRequested: boolean;
showAgenda: boolean;
showHelpButton = environment.showHelpButton;
showWaitSpinner: boolean;
selectedEvent: Event;
selectedStartValue: number;
timeIncrement = environment.time_slot_size; // minutes to increment select boxes by
timeSlots: Timeslot[] = [];
title = 'Room Scheduler';
schedulingWindow = 5; // minutes after a time window start time when the resource still be scheduled
unoccupied: boolean;
validTimeIncrements: TimeIncrement[] = [];
percentOfDayExpended: number;
private _keyboardRef: MdKeyboardRef<MdKeyboardComponent>;
darkTheme: boolean;
duration: number;
hasAction: boolean;
isDebug: boolean;
defaultLocale: string;
layout: string;
layouts: {
name: string;
layout: IKeyboardLayout;
}[];
get keyboardVisible(): boolean {
return this._keyboardService.isOpened;
}
constructor(private _keyboardService: MdKeyboardService,
@Inject(LOCALE_ID) public locale,
@Inject(MD_KEYBOARD_LAYOUTS) private _layouts,
private http: HttpClient) { }
ngOnInit(): void {
//var that = this;
window.addEventListener('load', function(){
document.addEventListener('touchstart', function(e){
if (timeoutID > 0 && timeoutID != null){
window.clearTimeout(timeoutID);
timeoutID = window.setTimeout(timeoutTTL);
}
e.preventDefault()
}, false)
}, false)
this.noEvents = true;
this.defaultLocale = ` ${this.LOCALE}`.slice(1);
this.utcTime();
this.transitionTimer = new SimpleTimer();
//console.log(this.timeSlots);
this.bookEvent = false;
this.cancellation = false;
this.calendarWorkdayEndHour = 17;
this.calendarWorkdayStartHour = 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) |
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
var minutes: number = now.getMinutes() * 60;
var seconds: number = now.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = now.getTime();
now.setHours(0);
now.setMinutes(0);
now.setSeconds(0);
var t2 = now.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}
deriveVariablesFromHostname(res: Resource): void {
var buildingAndRoom = hostname.split(" ", 2);
var building = buildingAndRoom[0];
var room = buildingAndRoom[1];
res.id = building + "-" + room;
res.busy = false;
res.name = building + " " + room;
res.o365Name = res.id;
}
durationString(selectedEvent): string {
var duration = "";
var Date_Start = new Date(selectedEvent.start);
var Date_End = new Date(selectedEvent.end);
var Difference = Date_End.valueOf() - Date_Start.valueOf();
var diffDays = Math.floor(Difference / 86400000); // days
var diffHrs = Math.floor((Difference % 86400000) / 3600000); // hours
var diffMins = Math.round(((Difference % 86400000) % 3600000) / 60000); // minutes
if (diffMins > 0) {
duration = diffMins.toString() + " Minutes"
}
if (diffHrs > 0) {
duration = diffHrs + " Hours " + duration;
}
return (duration);
}
evalTime(): void {
//this.refreshData();
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
this.unoccupied = !(this.occupied);
}
getSelectedText(elementId,index): string {
var elem = document.getElementById(elementId).getElementsByTagName( 'option' )[index];
return elem.text;
}
helpClick(): void {
this.helpPressed = true;
this.startScreenResetTimeout(10);
}
helpInformationRequest(): void {
this.helpPressed = false;
this.helpInformation = true;
//this.resetModal();
// show information;
}
helpRequest(): void {
this.helpPressed = false;
this.helpRequested = true;
var resp = this.http.post(environment.slack_webhook_url, "{\"text\":\"Help request from " + this.resource.name + "\"}").subscribe();
////console.log(resp);
this.startScreenResetTimeout(3);
}
modalTimerCallback(): void {
if (this.modalTransitionTimerCounter <= this.modalTimeout) {
this.modalTransitionTimerCounter++;
} else {
this.subscribeHelpTimer();
this.resetModal();
}
}
onSelect(event: Event): void {
this.selectedEvent = event;
}
onStartChange(selectedStartOption): void {
var i = Number(selectedStartOption) + 1;
this.newEventEndTimeId = i.toString();
this.getNewStartTime(selectedStartOption);
this.getNewEndTime(this.newEventEndTimeId);
}
percent(): void {
setInterval(function() {
var secondsInADay = 24 * 60 * 60;
var now = new Date();
var hours = now.getHours() * 60 * 60;
var minutes = now.getMinutes() * 60;
var seconds = now.getSeconds();
var totalSeconds = hours + minutes + seconds;
var percentSeconds = 100 * totalSeconds / secondsInADay;
this.percentOfDayExpended = percentSeconds;
}, 1000);
}
refreshData(): void {
this.populateRefHours();
this.events = [];
this.noEvents = true;
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
this.http.get(url).subscribe(data => {
angular.forEach(data, function(obj) {
var e = new Event();
e.Subject = obj.subject;
e.Start = obj.start;
e.End = obj.end;
this.events.push(e);
this.noEvents = false;
}, this);
});
/*for (var i = 0; i < this.timeSlots.length; i++) {
var e = new Event();
e.Subject = "Available";
e.Start = this.timeSlots[i].Start;
e.End = this.timeSlots[i].End;
this.events.push(e);
}
this.consolidate_events();*/
this.currentMeeting();
}
reset(): void {
this.refreshData();
this.bookEvent = false;
this.cancellation = false;
this.helpInformation = false;
this.helpPressed = false;
this.helpRequested = false;
this.newEventEndTimeId = null;
this.newEventStartTimeId = null;
this.restartRequested = false;
this.showAgenda = false;
this.showWaitSpinner = false;
}
resetModal(): void {
this.helpPressed = false;
this.helpRequested = false;
var m = document.getElementsByClassName("modalContent");
for (var mChild in m) {
setTimeout(function() {
var m = document.getElementsByClassName("modal")[0];
m.classList.add("hidden");
}, 2000,this);
}
}
resetTransitionTimer(): void {
this.transitionTimer.delTimer('modalTransition');
}
restartBrowser(): void {
this.helpInformation = false;
this.restartRequested = true;
this.startScreenResetTimeout(3);
this.refreshData();
window.location.reload(false);
}
resetTimeouts(): void {
this.startScreenResetTimeout(this.currentTimeoutTTL);
}
scheduleEvent(): void {
this.reset();
this.startScreenResetTimeout(10);
//this.refreshData();
this.showAgenda = true;
}
scrollReferenceEvent(elem): void {
var a = document.getElementById("agenda");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
scrollAgendaEvent(elem): void {
var a = document.getElementById("refHours");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
selectByClass(selector: string): HTMLCollectionOf<Element> {
var elements = document.getElementsByClassName(selector);
return elements;
}
selectById(selector: string): HTMLElement {
var element = document.getElementById(selector);
return element;
}
startScreenResetTimeout(ttl): void { //ttl in s
var t = ttl * 1000; //convert s to ms
this.currentTimeoutTTL = t;
var that = this;
this.stopScreenResetTimeout();
this.currentTimeout = setTimeout(function(){
that.reset();
that.closeCurrentKeyboard();
},t);
}
stopScreenResetTimeout(): void {
if (this.currentTimeout != null) {
clearTimeout(this.currentTimeout);
}
}
getNewEndTime(newTime): void {
this.newEventEndTimeValue = this.getSelectedText("newEventEndTime",newTime);
//console.log("end: " + this.newEventEndTimeValue);
}
getNewStartTime(newTime): void{
this.newEventStartTimeValue = this.getSelectedText("newEventStartTime",newTime);
//console.log("start: " + this.newEventStartTimeValue);
}
submitEventForm(): void {
this.showWaitSpinner=true;
var e = this.newEventEndTimeValue;
var s = this.newEventStartTimeValue;
this.submitEvent("Ad-hoc Meeting", s,e);
}
submitEvent(tmpSubject: string, tmpStartTime: string, tmpEndTime: string): void {
var req = new Event();
var today = new Date();
var M = today.getMonth(); //month is zero-indexed
var d = today.getDate();
var y = today.getFullYear();
var tzoffset = today.getTimezoneOffset();
var sH = 0;
var sM = 0;
var eH = 0;
var eM = 0;
const [starttime, startmodifier] = tmpStartTime.split(' ');
let [starthours, startminutes] = starttime.split(':');
if (starthours === '12') {
starthours = '00';
}
if (startmodifier === 'PM') {
sH = parseInt(starthours, 10) + 12;
}
else {
sH = parseInt(starthours)
}
sM = parseInt(startminutes);
const [endtime, endmodifier] = tmpEndTime.split(' ');
let [endhours, endminutes] = endtime.split(':');
if (endhours === '12') {
endhours = '00';
}
if (endmodifier === 'PM') {
eH = parseInt(endhours, 10) + 12;
}
else{
eH = parseInt(endhours)
}
eM = parseInt(endminutes);
//new Date(year, month, day, hours, minutes, seconds, milliseconds);
var startTime = new Date(y,M,d,sH,sM,0);
var endTime = new Date(y,M,d,eH,eM,0);
req.Subject = tmpSubject;
req.Start = new Date(startTime.getTime() - tzoffset*60000);
req.End = new Date(endTime.getTime() - tzoffset*60000);
/////////
/// SUBMIT
///////
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
var resp = this.http.post(url,JSON.stringify(req),{headers: new HttpHeaders().set('Content-Type', 'application/json')}).subscribe();
//this.restartRequested = true;
this.startScreenResetTimeout(1);
//this.refreshData();
//window.location.reload(false);
}
subscribeHelpTimer(): void {
if (this.modalTransitionTimerID) {
// Unsubscribe if timer Id is defined
this.transitionTimer.unsubscribe(this.modalTransitionTimerID);
this.modalTransitionTimerCounter = 0;
} else {
// Subscribe if timer Id is undefined
this.modalTransitionTimerID = this.transitionTimer.subscribe('modalTransition', () => this.modalTimerCallback());
}
}
utcTime(): void {
setInterval(() => {
this.date = new Date();
this.timePeriod = this.timeSlots[this.currentTimePeriod()];
this.percent();
this.currentMeeting();
this.evalTime();
}, 1000);
}
wait(): void {
this.showWaitSpinner = true;
}
}
| {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
} | conditional_block |
app.component.ts | import { Component, ElementRef, HostListener, Inject, LOCALE_ID, OnInit } from '@angular/core';
import { DOCUMENT } from '@angular/platform-browser';
import { HttpClient, HttpEvent, HttpInterceptor, HttpHandler, HttpRequest, HttpHeaders } from '@angular/common/http';
import { SimpleTimer } from 'ng2-simple-timer';
import { environment } from '../environments/environment';
import { Event, Timeslot } from './model/o365.model';
import { IKeyboardLayout, MD_KEYBOARD_LAYOUTS, MdKeyboardComponent, MdKeyboardRef, MdKeyboardService } from '@ngx-material-keyboard/core';
import * as angular from 'angular';
export class Resource {
id: string;
busy: boolean;
name: string;
o365Name: string;
}
export class TimeIncrement {
id: number;
value: string;
dateTimeValue: Date;
}
const RESOURCE: Resource = {
id: environment.hostname,
name: environment.resource_name,
o365Name: environment.resource_id,
busy: false
}
const hostname = environment.hostname;
const ip = environment.hostIP;
declare var newEvent: Event;
const NOEVENTS_MESSAGES: string[] = ["No Events Today", "Your schedule is clear", "My schedule is wide open"]
const TIMEZONE = environment.timezone;
declare var timeoutID: number;
declare var timeoutTTL: number;
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css','./keyboard.css']
})
export class AppComponent implements OnInit {
debug = environment.debug;
transitionTimer: SimpleTimer;
controller = this.controller;
allowBookNowFunction = environment.allow_book_now_function;
bookEvent: boolean;
calendarWorkdayEndHour: number;
calendarWorkdayStartHour: number;
cancellation: boolean;
currentEvent: Event;
currentTimeout: any;
currentTimeoutTTL = 0;
eventData: string;
timePeriod: Timeslot;
date: Date;
dayMillis: number;
timeOptions = {
hour: "2-digit", minute: "2-digit"
};
events: Event[] = [];
helpRequested: boolean;
helpInformation: boolean;
helpPressed: boolean;
LOCALE = "en-us";
modalTransitionTimerCounter = 0;
modalTransitionTimerID = "modalTransitionTimer";
modalTimeout = environment.popupWindowTimeout;
newEvent: Event;
//newEventTitle: string;
newEventTitle = "Ad-hoc Meeting";
newEventEndTimeId: string;
newEventEndTimeValue: string;
newEventStartTimeId: string;
newEventStartTimeValue: string;
noEvents: boolean;
noEvents_message = "No Events Today";
numTimeslots: number = 0;
occupied: boolean;
refHours: string[] = [];
resource = RESOURCE;
restartRequested: boolean;
showAgenda: boolean;
showHelpButton = environment.showHelpButton;
showWaitSpinner: boolean;
selectedEvent: Event;
selectedStartValue: number;
timeIncrement = environment.time_slot_size; // minutes to increment select boxes by
timeSlots: Timeslot[] = [];
title = 'Room Scheduler';
schedulingWindow = 5; // minutes after a time window start time when the resource still be scheduled
unoccupied: boolean;
validTimeIncrements: TimeIncrement[] = [];
percentOfDayExpended: number;
private _keyboardRef: MdKeyboardRef<MdKeyboardComponent>;
darkTheme: boolean;
duration: number;
hasAction: boolean;
| layout: string;
layouts: {
name: string;
layout: IKeyboardLayout;
}[];
get keyboardVisible(): boolean {
return this._keyboardService.isOpened;
}
constructor(private _keyboardService: MdKeyboardService,
@Inject(LOCALE_ID) public locale,
@Inject(MD_KEYBOARD_LAYOUTS) private _layouts,
private http: HttpClient) { }
ngOnInit(): void {
//var that = this;
window.addEventListener('load', function(){
document.addEventListener('touchstart', function(e){
if (timeoutID > 0 && timeoutID != null){
window.clearTimeout(timeoutID);
timeoutID = window.setTimeout(timeoutTTL);
}
e.preventDefault()
}, false)
}, false)
this.noEvents = true;
this.defaultLocale = ` ${this.LOCALE}`.slice(1);
this.utcTime();
this.transitionTimer = new SimpleTimer();
//console.log(this.timeSlots);
this.bookEvent = false;
this.cancellation = false;
this.calendarWorkdayEndHour = 17;
this.calendarWorkdayStartHour = 8;
this.currentEvent = this.events[1];
this.helpRequested = false;
this.helpPressed = false;
this.helpInformation = false;
this.restartRequested = false;
this.showWaitSpinner = false;
this.newEvent = null;
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
//this.occupied = false;
this.showAgenda = false;
this.selectedEvent = null;
this.selectedStartValue = 0;
this.unoccupied = !(this.occupied);
/*for (var i = this.calendarWorkdayStartHour; i <= this.calendarWorkdayEndHour; i++) {
if (i > 12) {
var iNum = +i;
var nNum = iNum - 12;
this.refHours.push(nNum.toString());
}
else {
this.refHours.push(i.toString());
}
var newDate = new Date()
newDate.setHours(i);
}*/
this.refreshData();
}
calcTimeslots(): void {
this.numTimeslots = ( this.calendarWorkdayEndHour - this.calendarWorkdayStartHour ) * (60 / this.timeIncrement);
this.populateRefHours();
this.populateTimeslots();
}
populateRefHours(): void {
this.refHours = [];
for (var i=this.calendarWorkdayStartHour; i < this.calendarWorkdayEndHour; i++ ){
this.refHours.push(i.toString());
}
}
populateTimeslots(): void {
// Populate valid time scheduling window
var d = new Date();
var tomorrow = new Date();
tomorrow.setDate(d.getDate() + 1);
tomorrow.setTime(0);
var minutes = d.getMinutes();
//var hours = d.getHours();
var m = 0;
if (this.timeIncrement == 15) {
m = (((minutes + 7.5) / 15 | 0) * 15) % 60; // Nearest 15 minute interval, rounded down
}
else {
m = (((minutes + 15) / 30 | 0) * 30) % 60;
//m = (Math.round(minutes/30) * 30) % 60;
}
// var h = ((((minutes/105) + .5) | 0) + hours) % 24; // Not quite right.
d.setMinutes(m);
d.setSeconds(0);
for (var i = 0; i < this.numTimeslots; i++) {
var amPm = "AM";
var mins = d.getMinutes();
var hours = d.getHours();
if (hours > 12) {
amPm = "PM";
hours = hours - 12;
}
if ((new Date).getDay() == d.getDay()) {
this.validTimeIncrements.push({
id: i,
dateTimeValue: d,
value: d.toLocaleTimeString(this.LOCALE, this.timeOptions)
//value: hours.toString() + ":" + mins.toString() + " " + amPm
});
}
d.setMinutes(mins + this.timeIncrement);
}
//Populate timeslots
for (var j = 0; j < 96; j++) {
var tmpTime1 = new Date();
var tmpTime2 = new Date(tmpTime1.valueOf());
var t2 = 0;
var t = new Timeslot();
tmpTime1.setMinutes(j * 15);
t.Start = tmpTime1;
if (j < 96) {
t2 = j + 1;
}
else {
t2 = j;
}
tmpTime2.setMinutes((j + 1) * 15);
t.End = tmpTime2;
this.timeSlots.push(t);
/*var h = t.Start.getHours();
if (t.Start.getHours() > 12) {
h = +(t.Start.getHours()) - 12;
}
if (this.refHours.length <= 0) {
this.refHours.push(h.toPrecision(1).toString());
}
else {
if (this.refHours[-1].valueOf() != h.toPrecision(1).toString()) {
this.refHours.push(h.toPrecision(1).toString());
}
}*/
tmpTime1 = null;
tmpTime2 = null;
}
}
openKeyboard(locale = this.defaultLocale) {
this._keyboardRef = this._keyboardService.open(locale, {
//darkTheme: this.darkTheme,
//darkTheme: true,
darkTheme:false,
duration: this.duration,
hasAction: this.hasAction,
isDebug: this.isDebug
});
}
closeCurrentKeyboard() {
if (this._keyboardRef) {
this._keyboardRef.dismiss();
}
}
toggleDarkTheme(dark: boolean) {
this.darkTheme = dark;
this._keyboardRef.darkTheme = dark;
}
availabilityClass(e: Event): string {
if (e.Subject.toString() == 'Available') {
return "agenda-view-row-available";
}
else {
return "agenda-view-row-unavailable";
}
}
bookNewEvent(): void {
/*//this.reset();
var d = new Date();
this.bookEvent = true;
this.newEvent = new Event();
var year = d.getFullYear().toString();
var month = d.getMonth().toString();
var day = d.getDay().toString();
var s = "" + year + "-" + month + "-" + day + "T";
var e = "" + year + "-" + month + "-" + day + "T";
var sH = "";
var eH = "";
if (this.newEventStartAmPm === "AM") {
sH = (this.newEventStartHour).toString();
}
else {
var sI = +(this.newEventStartHour);
sH = (sI + 12).toString();
}
if (this.newEventEndAmPm === "AM") {
eH = (this.newEventEndHour).toString();
}
else {
var eI = +(this.newEventEndHour);
eH = (eI + 12).toString();
}
s += sH + ":" + this.newEventStartMinute + ":000";
e += eH + ":" + this.newEventEndMinute + ":000";*/
this.reset();
}
bookNow(): void {
this.reset();
this.startScreenResetTimeout(70);
this.bookEvent = true;
this.calcTimeslots();
}
cancelEvent(event: Event): void {
this.reset();
this.cancellation = true;
}
cancelPage_no(): void {
this.reset();
}
cancelPage_yes(): void {
this.reset();
}
consolidate_events(): void {
//console.log("Consolidating events");
var consolidate = true;
var i = this.events.length - 1;
////console.log(i.toString());
while (consolidate) {
if (i > 0) {
if (this.events[i].Subject === this.events[i - 1].Subject) {
this.events[i - 1].End = new Date(this.events[i].End.getDate());
this.events.pop();
i = this.events.length - 1;
}
else {
i--;
}
if (i == 0) {
consolidate = false;
break;
}
}
else {
break;
}
}
}
currentMeeting() {
var now = new Date();
for (var i = 0; i < this.events.length; i++) {
if ((new Date(this.events[i].Start) <= now) && (new Date(this.events[i].End) >= now)) {
this.currentEvent = this.events[i];
//console.log(this.currentEvent);
return;
}
}
this.currentEvent = null;
//console.log(this.currentEvent);
}
/*getTimePeriod(d:Date): number {
var t = new Date(d.getDate());
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = t.getHours() * 60 * 60;
var minutes: number = t.getMinutes() * 60;
var seconds: number = t.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = t.getTime();
t.setHours(0);
t.setMinutes(0);
t.setSeconds(0);
var t2 = t.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}*/
currentTimePeriod(): number { // Return time period (0<x<96) for current time
var now = new Date();
var msIn15Min: number = 900000;
var secondsInADay: number = 24 * 60 * 60;
var hours: number = now.getHours() * 60 * 60;
var minutes: number = now.getMinutes() * 60;
var seconds: number = now.getSeconds();
var ms: number = (hours + minutes + seconds) * 1000;
var t1: number = now.getTime();
now.setHours(0);
now.setMinutes(0);
now.setSeconds(0);
var t2 = now.getTime();
var ret = 0;
ret = Math.floor((t1 - t2) / msIn15Min);
return ret;
}
deriveVariablesFromHostname(res: Resource): void {
var buildingAndRoom = hostname.split(" ", 2);
var building = buildingAndRoom[0];
var room = buildingAndRoom[1];
res.id = building + "-" + room;
res.busy = false;
res.name = building + " " + room;
res.o365Name = res.id;
}
durationString(selectedEvent): string {
var duration = "";
var Date_Start = new Date(selectedEvent.start);
var Date_End = new Date(selectedEvent.end);
var Difference = Date_End.valueOf() - Date_Start.valueOf();
var diffDays = Math.floor(Difference / 86400000); // days
var diffHrs = Math.floor((Difference % 86400000) / 3600000); // hours
var diffMins = Math.round(((Difference % 86400000) % 3600000) / 60000); // minutes
if (diffMins > 0) {
duration = diffMins.toString() + " Minutes"
}
if (diffHrs > 0) {
duration = diffHrs + " Hours " + duration;
}
return (duration);
}
evalTime(): void {
//this.refreshData();
if (this.currentEvent != null) {
this.occupied = true;
}
else {
this.occupied = false;
}
this.unoccupied = !(this.occupied);
}
getSelectedText(elementId,index): string {
var elem = document.getElementById(elementId).getElementsByTagName( 'option' )[index];
return elem.text;
}
helpClick(): void {
this.helpPressed = true;
this.startScreenResetTimeout(10);
}
helpInformationRequest(): void {
this.helpPressed = false;
this.helpInformation = true;
//this.resetModal();
// show information;
}
helpRequest(): void {
this.helpPressed = false;
this.helpRequested = true;
var resp = this.http.post(environment.slack_webhook_url, "{\"text\":\"Help request from " + this.resource.name + "\"}").subscribe();
////console.log(resp);
this.startScreenResetTimeout(3);
}
modalTimerCallback(): void {
if (this.modalTransitionTimerCounter <= this.modalTimeout) {
this.modalTransitionTimerCounter++;
} else {
this.subscribeHelpTimer();
this.resetModal();
}
}
onSelect(event: Event): void {
this.selectedEvent = event;
}
onStartChange(selectedStartOption): void {
var i = Number(selectedStartOption) + 1;
this.newEventEndTimeId = i.toString();
this.getNewStartTime(selectedStartOption);
this.getNewEndTime(this.newEventEndTimeId);
}
percent(): void {
setInterval(function() {
var secondsInADay = 24 * 60 * 60;
var now = new Date();
var hours = now.getHours() * 60 * 60;
var minutes = now.getMinutes() * 60;
var seconds = now.getSeconds();
var totalSeconds = hours + minutes + seconds;
var percentSeconds = 100 * totalSeconds / secondsInADay;
this.percentOfDayExpended = percentSeconds;
}, 1000);
}
refreshData(): void {
this.populateRefHours();
this.events = [];
this.noEvents = true;
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
this.http.get(url).subscribe(data => {
angular.forEach(data, function(obj) {
var e = new Event();
e.Subject = obj.subject;
e.Start = obj.start;
e.End = obj.end;
this.events.push(e);
this.noEvents = false;
}, this);
});
/*for (var i = 0; i < this.timeSlots.length; i++) {
var e = new Event();
e.Subject = "Available";
e.Start = this.timeSlots[i].Start;
e.End = this.timeSlots[i].End;
this.events.push(e);
}
this.consolidate_events();*/
this.currentMeeting();
}
reset(): void {
this.refreshData();
this.bookEvent = false;
this.cancellation = false;
this.helpInformation = false;
this.helpPressed = false;
this.helpRequested = false;
this.newEventEndTimeId = null;
this.newEventStartTimeId = null;
this.restartRequested = false;
this.showAgenda = false;
this.showWaitSpinner = false;
}
resetModal(): void {
this.helpPressed = false;
this.helpRequested = false;
var m = document.getElementsByClassName("modalContent");
for (var mChild in m) {
setTimeout(function() {
var m = document.getElementsByClassName("modal")[0];
m.classList.add("hidden");
}, 2000,this);
}
}
resetTransitionTimer(): void {
this.transitionTimer.delTimer('modalTransition');
}
restartBrowser(): void {
this.helpInformation = false;
this.restartRequested = true;
this.startScreenResetTimeout(3);
this.refreshData();
window.location.reload(false);
}
resetTimeouts(): void {
this.startScreenResetTimeout(this.currentTimeoutTTL);
}
scheduleEvent(): void {
this.reset();
this.startScreenResetTimeout(10);
//this.refreshData();
this.showAgenda = true;
}
scrollReferenceEvent(elem): void {
var a = document.getElementById("agenda");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
scrollAgendaEvent(elem): void {
var a = document.getElementById("refHours");
var t = document.getElementById("current-time-bar-wrapper");
a.scrollTop = elem.scrollTop;
t.scrollTop = elem.scrollTop;
}
selectByClass(selector: string): HTMLCollectionOf<Element> {
var elements = document.getElementsByClassName(selector);
return elements;
}
selectById(selector: string): HTMLElement {
var element = document.getElementById(selector);
return element;
}
startScreenResetTimeout(ttl): void { //ttl in s
var t = ttl * 1000; //convert s to ms
this.currentTimeoutTTL = t;
var that = this;
this.stopScreenResetTimeout();
this.currentTimeout = setTimeout(function(){
that.reset();
that.closeCurrentKeyboard();
},t);
}
stopScreenResetTimeout(): void {
if (this.currentTimeout != null) {
clearTimeout(this.currentTimeout);
}
}
getNewEndTime(newTime): void {
this.newEventEndTimeValue = this.getSelectedText("newEventEndTime",newTime);
//console.log("end: " + this.newEventEndTimeValue);
}
getNewStartTime(newTime): void{
this.newEventStartTimeValue = this.getSelectedText("newEventStartTime",newTime);
//console.log("start: " + this.newEventStartTimeValue);
}
submitEventForm(): void {
this.showWaitSpinner=true;
var e = this.newEventEndTimeValue;
var s = this.newEventStartTimeValue;
this.submitEvent("Ad-hoc Meeting", s,e);
}
submitEvent(tmpSubject: string, tmpStartTime: string, tmpEndTime: string): void {
var req = new Event();
var today = new Date();
var M = today.getMonth(); //month is zero-indexed
var d = today.getDate();
var y = today.getFullYear();
var tzoffset = today.getTimezoneOffset();
var sH = 0;
var sM = 0;
var eH = 0;
var eM = 0;
const [starttime, startmodifier] = tmpStartTime.split(' ');
let [starthours, startminutes] = starttime.split(':');
if (starthours === '12') {
starthours = '00';
}
if (startmodifier === 'PM') {
sH = parseInt(starthours, 10) + 12;
}
else {
sH = parseInt(starthours)
}
sM = parseInt(startminutes);
const [endtime, endmodifier] = tmpEndTime.split(' ');
let [endhours, endminutes] = endtime.split(':');
if (endhours === '12') {
endhours = '00';
}
if (endmodifier === 'PM') {
eH = parseInt(endhours, 10) + 12;
}
else{
eH = parseInt(endhours)
}
eM = parseInt(endminutes);
//new Date(year, month, day, hours, minutes, seconds, milliseconds);
var startTime = new Date(y,M,d,sH,sM,0);
var endTime = new Date(y,M,d,eH,eM,0);
req.Subject = tmpSubject;
req.Start = new Date(startTime.getTime() - tzoffset*60000);
req.End = new Date(endTime.getTime() - tzoffset*60000);
/////////
/// SUBMIT
///////
var url = 'http://' + ip + ':5000/v1.0/exchange/calendar/events';
var resp = this.http.post(url,JSON.stringify(req),{headers: new HttpHeaders().set('Content-Type', 'application/json')}).subscribe();
//this.restartRequested = true;
this.startScreenResetTimeout(1);
//this.refreshData();
//window.location.reload(false);
}
subscribeHelpTimer(): void {
if (this.modalTransitionTimerID) {
// Unsubscribe if timer Id is defined
this.transitionTimer.unsubscribe(this.modalTransitionTimerID);
this.modalTransitionTimerCounter = 0;
} else {
// Subscribe if timer Id is undefined
this.modalTransitionTimerID = this.transitionTimer.subscribe('modalTransition', () => this.modalTimerCallback());
}
}
utcTime(): void {
setInterval(() => {
this.date = new Date();
this.timePeriod = this.timeSlots[this.currentTimePeriod()];
this.percent();
this.currentMeeting();
this.evalTime();
}, 1000);
}
wait(): void {
this.showWaitSpinner = true;
}
} | isDebug: boolean;
defaultLocale: string;
| random_line_split |
dhcpd.go | package dhcpd
import (
"bytes"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/happyhater/golibs/log"
"github.com/krolaw/dhcp4"
ping "github.com/sparrc/go-ping"
)
const defaultDiscoverTime = time.Second * 3
// Lease contains the necessary information about a DHCP lease
// field ordering is important -- yaml fields will mirror ordering from here
type Lease struct {
HWAddr net.HardwareAddr `json:"mac" yaml:"hwaddr"`
IP net.IP `json:"ip"`
Hostname string `json:"hostname"`
Expiry time.Time `json:"expires"`
}
// ServerConfig - DHCP server configuration
// field ordering is important -- yaml fields will mirror ordering from here
type ServerConfig struct {
Enabled bool `json:"enabled" yaml:"enabled"`
InterfaceName string `json:"interface_name" yaml:"interface_name"` // eth0, en0 and so on
GatewayIP string `json:"gateway_ip" yaml:"gateway_ip"`
SubnetMask string `json:"subnet_mask" yaml:"subnet_mask"`
RangeStart string `json:"range_start" yaml:"range_start"`
RangeEnd string `json:"range_end" yaml:"range_end"`
LeaseDuration uint `json:"lease_duration" yaml:"lease_duration"` // in seconds
// IP conflict detector: time (ms) to wait for ICMP reply.
// 0: disable
ICMPTimeout uint `json:"icmp_timeout_msec" yaml:"icmp_timeout_msec"`
}
// Server - the current state of the DHCP server
type Server struct {
conn *filterConn // listening UDP socket
ipnet *net.IPNet // if interface name changes, this needs to be reset
cond *sync.Cond // Synchronize worker thread with main thread
mutex sync.Mutex // Mutex for 'cond'
running bool // Set if the worker thread is running
stopping bool // Set if the worker thread should be stopped
// leases
leases []*Lease
leaseStart net.IP // parsed from config RangeStart
leaseStop net.IP // parsed from config RangeEnd
leaseTime time.Duration // parsed from config LeaseDuration
leaseOptions dhcp4.Options // parsed from config GatewayIP and SubnetMask
// IP address pool -- if entry is in the pool, then it's attached to a lease
IPpool map[[4]byte]net.HardwareAddr
ServerConfig
sync.RWMutex
}
// Print information about the available network interfaces
func printInterfaces() {
ifaces, _ := net.Interfaces()
var buf strings.Builder
for i := range ifaces {
buf.WriteString(fmt.Sprintf("\"%s\", ", ifaces[i].Name))
}
log.Info("Available network interfaces: %s", buf.String())
}
// Start will listen on port 67 and serve DHCP requests.
// Even though config can be nil, it is not optional (at least for now), since there are no default values (yet).
func (s *Server) Start(config *ServerConfig) error {
if config != nil {
s.ServerConfig = *config
}
iface, err := net.InterfaceByName(s.InterfaceName)
if err != nil {
s.closeConn() // in case it was already started
printInterfaces()
return wrapErrPrint(err, "Couldn't find interface by name %s", s.InterfaceName)
}
// get ipv4 address of an interface
s.ipnet = getIfaceIPv4(iface)
if s.ipnet == nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil |
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr())
if !isValidPacket(p) {
return nil
}
server := options[dhcp4.OptionServerIdentifier]
if server != nil && !net.IP(server).Equal(s.ipnet.IP) {
log.Tracef("Request message not for this DHCP server (%v vs %v)", server, s.ipnet.IP)
return nil // Message not for this dhcp server
}
if reqIP == nil {
reqIP = p.CIAddr()
} else if reqIP == nil || reqIP.To4() == nil {
log.Tracef("Requested IP isn't a valid IPv4: %s", reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease = s.findLease(p)
if lease == nil {
log.Tracef("Lease for %s isn't found", p.CHAddr())
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
if !lease.IP.Equal(reqIP) {
log.Tracef("Lease for %s doesn't match requested/client IP: %s vs %s",
lease.HWAddr, lease.IP, reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease.Expiry = time.Now().Add(s.leaseTime)
log.Tracef("Replying with ACK. IP: %s HW: %s Expire: %s",
lease.IP, lease.HWAddr, lease.Expiry)
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
return dhcp4.ReplyPacket(p, dhcp4.ACK, s.ipnet.IP, lease.IP, s.leaseTime, opt)
}
func (s *Server) handleInform(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Inform. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleRelease(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Release. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleDecline(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Decline. IP: %s HW: %s",
reqIP, p.CHAddr())
return nil
}
// Leases returns the list of current DHCP leases (thread-safe)
func (s *Server) Leases() []Lease {
var result []Lease
now := time.Now().Unix()
s.RLock()
for _, lease := range s.leases {
if lease.Expiry.Unix() > now {
result = append(result, *lease)
}
}
s.RUnlock()
return result
}
// Print information about the current leases
func (s *Server) printLeases() {
log.Tracef("Leases:")
for i, lease := range s.leases {
log.Tracef("Lease #%d: hwaddr %s, ip %s, expiry %s",
i, lease.HWAddr, lease.IP, lease.Expiry)
}
}
// Reset internal state
func (s *Server) reset() {
s.Lock()
s.leases = nil
s.Unlock()
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
| {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
} | conditional_block |
dhcpd.go | package dhcpd
import (
"bytes"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/happyhater/golibs/log"
"github.com/krolaw/dhcp4"
ping "github.com/sparrc/go-ping"
)
const defaultDiscoverTime = time.Second * 3
// Lease contains the necessary information about a DHCP lease
// field ordering is important -- yaml fields will mirror ordering from here
type Lease struct {
HWAddr net.HardwareAddr `json:"mac" yaml:"hwaddr"`
IP net.IP `json:"ip"`
Hostname string `json:"hostname"`
Expiry time.Time `json:"expires"`
}
// ServerConfig - DHCP server configuration
// field ordering is important -- yaml fields will mirror ordering from here
type ServerConfig struct {
Enabled bool `json:"enabled" yaml:"enabled"`
InterfaceName string `json:"interface_name" yaml:"interface_name"` // eth0, en0 and so on
GatewayIP string `json:"gateway_ip" yaml:"gateway_ip"`
SubnetMask string `json:"subnet_mask" yaml:"subnet_mask"`
RangeStart string `json:"range_start" yaml:"range_start"`
RangeEnd string `json:"range_end" yaml:"range_end"`
LeaseDuration uint `json:"lease_duration" yaml:"lease_duration"` // in seconds
// IP conflict detector: time (ms) to wait for ICMP reply.
// 0: disable
ICMPTimeout uint `json:"icmp_timeout_msec" yaml:"icmp_timeout_msec"`
}
// Server - the current state of the DHCP server
type Server struct {
conn *filterConn // listening UDP socket
ipnet *net.IPNet // if interface name changes, this needs to be reset
cond *sync.Cond // Synchronize worker thread with main thread
mutex sync.Mutex // Mutex for 'cond'
running bool // Set if the worker thread is running
stopping bool // Set if the worker thread should be stopped
// leases
leases []*Lease
leaseStart net.IP // parsed from config RangeStart
leaseStop net.IP // parsed from config RangeEnd
leaseTime time.Duration // parsed from config LeaseDuration
leaseOptions dhcp4.Options // parsed from config GatewayIP and SubnetMask
// IP address pool -- if entry is in the pool, then it's attached to a lease
IPpool map[[4]byte]net.HardwareAddr
ServerConfig
sync.RWMutex
}
// Print information about the available network interfaces
func printInterfaces() {
ifaces, _ := net.Interfaces()
var buf strings.Builder
for i := range ifaces {
buf.WriteString(fmt.Sprintf("\"%s\", ", ifaces[i].Name))
}
log.Info("Available network interfaces: %s", buf.String())
}
// Start will listen on port 67 and serve DHCP requests.
// Even though config can be nil, it is not optional (at least for now), since there are no default values (yet).
func (s *Server) Start(config *ServerConfig) error {
if config != nil {
s.ServerConfig = *config
}
iface, err := net.InterfaceByName(s.InterfaceName)
if err != nil {
s.closeConn() // in case it was already started
printInterfaces()
return wrapErrPrint(err, "Couldn't find interface by name %s", s.InterfaceName)
}
// get ipv4 address of an interface
s.ipnet = getIfaceIPv4(iface)
if s.ipnet == nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
}
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet { |
if !isValidPacket(p) {
return nil
}
server := options[dhcp4.OptionServerIdentifier]
if server != nil && !net.IP(server).Equal(s.ipnet.IP) {
log.Tracef("Request message not for this DHCP server (%v vs %v)", server, s.ipnet.IP)
return nil // Message not for this dhcp server
}
if reqIP == nil {
reqIP = p.CIAddr()
} else if reqIP == nil || reqIP.To4() == nil {
log.Tracef("Requested IP isn't a valid IPv4: %s", reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease = s.findLease(p)
if lease == nil {
log.Tracef("Lease for %s isn't found", p.CHAddr())
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
if !lease.IP.Equal(reqIP) {
log.Tracef("Lease for %s doesn't match requested/client IP: %s vs %s",
lease.HWAddr, lease.IP, reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease.Expiry = time.Now().Add(s.leaseTime)
log.Tracef("Replying with ACK. IP: %s HW: %s Expire: %s",
lease.IP, lease.HWAddr, lease.Expiry)
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
return dhcp4.ReplyPacket(p, dhcp4.ACK, s.ipnet.IP, lease.IP, s.leaseTime, opt)
}
func (s *Server) handleInform(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Inform. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleRelease(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Release. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleDecline(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Decline. IP: %s HW: %s",
reqIP, p.CHAddr())
return nil
}
// Leases returns the list of current DHCP leases (thread-safe)
func (s *Server) Leases() []Lease {
var result []Lease
now := time.Now().Unix()
s.RLock()
for _, lease := range s.leases {
if lease.Expiry.Unix() > now {
result = append(result, *lease)
}
}
s.RUnlock()
return result
}
// Print information about the current leases
func (s *Server) printLeases() {
log.Tracef("Leases:")
for i, lease := range s.leases {
log.Tracef("Lease #%d: hwaddr %s, ip %s, expiry %s",
i, lease.HWAddr, lease.IP, lease.Expiry)
}
}
// Reset internal state
func (s *Server) reset() {
s.Lock()
s.leases = nil
s.Unlock()
s.IPpool = make(map[[4]byte]net.HardwareAddr)
} | var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr()) | random_line_split |
dhcpd.go | package dhcpd
import (
"bytes"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/happyhater/golibs/log"
"github.com/krolaw/dhcp4"
ping "github.com/sparrc/go-ping"
)
const defaultDiscoverTime = time.Second * 3
// Lease contains the necessary information about a DHCP lease
// field ordering is important -- yaml fields will mirror ordering from here
type Lease struct {
HWAddr net.HardwareAddr `json:"mac" yaml:"hwaddr"`
IP net.IP `json:"ip"`
Hostname string `json:"hostname"`
Expiry time.Time `json:"expires"`
}
// ServerConfig - DHCP server configuration
// field ordering is important -- yaml fields will mirror ordering from here
type ServerConfig struct {
Enabled bool `json:"enabled" yaml:"enabled"`
InterfaceName string `json:"interface_name" yaml:"interface_name"` // eth0, en0 and so on
GatewayIP string `json:"gateway_ip" yaml:"gateway_ip"`
SubnetMask string `json:"subnet_mask" yaml:"subnet_mask"`
RangeStart string `json:"range_start" yaml:"range_start"`
RangeEnd string `json:"range_end" yaml:"range_end"`
LeaseDuration uint `json:"lease_duration" yaml:"lease_duration"` // in seconds
// IP conflict detector: time (ms) to wait for ICMP reply.
// 0: disable
ICMPTimeout uint `json:"icmp_timeout_msec" yaml:"icmp_timeout_msec"`
}
// Server - the current state of the DHCP server
type Server struct {
conn *filterConn // listening UDP socket
ipnet *net.IPNet // if interface name changes, this needs to be reset
cond *sync.Cond // Synchronize worker thread with main thread
mutex sync.Mutex // Mutex for 'cond'
running bool // Set if the worker thread is running
stopping bool // Set if the worker thread should be stopped
// leases
leases []*Lease
leaseStart net.IP // parsed from config RangeStart
leaseStop net.IP // parsed from config RangeEnd
leaseTime time.Duration // parsed from config LeaseDuration
leaseOptions dhcp4.Options // parsed from config GatewayIP and SubnetMask
// IP address pool -- if entry is in the pool, then it's attached to a lease
IPpool map[[4]byte]net.HardwareAddr
ServerConfig
sync.RWMutex
}
// Print information about the available network interfaces
func printInterfaces() {
ifaces, _ := net.Interfaces()
var buf strings.Builder
for i := range ifaces {
buf.WriteString(fmt.Sprintf("\"%s\", ", ifaces[i].Name))
}
log.Info("Available network interfaces: %s", buf.String())
}
// Start will listen on port 67 and serve DHCP requests.
// Even though config can be nil, it is not optional (at least for now), since there are no default values (yet).
func (s *Server) Start(config *ServerConfig) error {
if config != nil {
s.ServerConfig = *config
}
iface, err := net.InterfaceByName(s.InterfaceName)
if err != nil {
s.closeConn() // in case it was already started
printInterfaces()
return wrapErrPrint(err, "Couldn't find interface by name %s", s.InterfaceName)
}
// get ipv4 address of an interface
s.ipnet = getIfaceIPv4(iface)
if s.ipnet == nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
}
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr())
if !isValidPacket(p) {
return nil
}
server := options[dhcp4.OptionServerIdentifier]
if server != nil && !net.IP(server).Equal(s.ipnet.IP) {
log.Tracef("Request message not for this DHCP server (%v vs %v)", server, s.ipnet.IP)
return nil // Message not for this dhcp server
}
if reqIP == nil {
reqIP = p.CIAddr()
} else if reqIP == nil || reqIP.To4() == nil {
log.Tracef("Requested IP isn't a valid IPv4: %s", reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease = s.findLease(p)
if lease == nil {
log.Tracef("Lease for %s isn't found", p.CHAddr())
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
if !lease.IP.Equal(reqIP) {
log.Tracef("Lease for %s doesn't match requested/client IP: %s vs %s",
lease.HWAddr, lease.IP, reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease.Expiry = time.Now().Add(s.leaseTime)
log.Tracef("Replying with ACK. IP: %s HW: %s Expire: %s",
lease.IP, lease.HWAddr, lease.Expiry)
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
return dhcp4.ReplyPacket(p, dhcp4.ACK, s.ipnet.IP, lease.IP, s.leaseTime, opt)
}
func (s *Server) handleInform(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Inform. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleRelease(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Release. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleDecline(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Decline. IP: %s HW: %s",
reqIP, p.CHAddr())
return nil
}
// Leases returns the list of current DHCP leases (thread-safe)
func (s *Server) Leases() []Lease {
var result []Lease
now := time.Now().Unix()
s.RLock()
for _, lease := range s.leases {
if lease.Expiry.Unix() > now {
result = append(result, *lease)
}
}
s.RUnlock()
return result
}
// Print information about the current leases
func (s *Server) printLeases() {
log.Tracef("Leases:")
for i, lease := range s.leases {
log.Tracef("Lease #%d: hwaddr %s, ip %s, expiry %s",
i, lease.HWAddr, lease.IP, lease.Expiry)
}
}
// Reset internal state
func (s *Server) reset() | {
s.Lock()
s.leases = nil
s.Unlock()
s.IPpool = make(map[[4]byte]net.HardwareAddr)
} | identifier_body | |
dhcpd.go | package dhcpd
import (
"bytes"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/happyhater/golibs/log"
"github.com/krolaw/dhcp4"
ping "github.com/sparrc/go-ping"
)
const defaultDiscoverTime = time.Second * 3
// Lease contains the necessary information about a DHCP lease
// field ordering is important -- yaml fields will mirror ordering from here
type Lease struct {
HWAddr net.HardwareAddr `json:"mac" yaml:"hwaddr"`
IP net.IP `json:"ip"`
Hostname string `json:"hostname"`
Expiry time.Time `json:"expires"`
}
// ServerConfig - DHCP server configuration
// field ordering is important -- yaml fields will mirror ordering from here
type ServerConfig struct {
Enabled bool `json:"enabled" yaml:"enabled"`
InterfaceName string `json:"interface_name" yaml:"interface_name"` // eth0, en0 and so on
GatewayIP string `json:"gateway_ip" yaml:"gateway_ip"`
SubnetMask string `json:"subnet_mask" yaml:"subnet_mask"`
RangeStart string `json:"range_start" yaml:"range_start"`
RangeEnd string `json:"range_end" yaml:"range_end"`
LeaseDuration uint `json:"lease_duration" yaml:"lease_duration"` // in seconds
// IP conflict detector: time (ms) to wait for ICMP reply.
// 0: disable
ICMPTimeout uint `json:"icmp_timeout_msec" yaml:"icmp_timeout_msec"`
}
// Server - the current state of the DHCP server
type Server struct {
conn *filterConn // listening UDP socket
ipnet *net.IPNet // if interface name changes, this needs to be reset
cond *sync.Cond // Synchronize worker thread with main thread
mutex sync.Mutex // Mutex for 'cond'
running bool // Set if the worker thread is running
stopping bool // Set if the worker thread should be stopped
// leases
leases []*Lease
leaseStart net.IP // parsed from config RangeStart
leaseStop net.IP // parsed from config RangeEnd
leaseTime time.Duration // parsed from config LeaseDuration
leaseOptions dhcp4.Options // parsed from config GatewayIP and SubnetMask
// IP address pool -- if entry is in the pool, then it's attached to a lease
IPpool map[[4]byte]net.HardwareAddr
ServerConfig
sync.RWMutex
}
// Print information about the available network interfaces
func printInterfaces() {
ifaces, _ := net.Interfaces()
var buf strings.Builder
for i := range ifaces {
buf.WriteString(fmt.Sprintf("\"%s\", ", ifaces[i].Name))
}
log.Info("Available network interfaces: %s", buf.String())
}
// Start will listen on port 67 and serve DHCP requests.
// Even though config can be nil, it is not optional (at least for now), since there are no default values (yet).
func (s *Server) Start(config *ServerConfig) error {
if config != nil {
s.ServerConfig = *config
}
iface, err := net.InterfaceByName(s.InterfaceName)
if err != nil {
s.closeConn() // in case it was already started
printInterfaces()
return wrapErrPrint(err, "Couldn't find interface by name %s", s.InterfaceName)
}
// get ipv4 address of an interface
s.ipnet = getIfaceIPv4(iface)
if s.ipnet == nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Couldn't find IPv4 address of interface %s %+v", s.InterfaceName, iface)
}
if s.LeaseDuration == 0 {
s.leaseTime = time.Hour * 2
s.LeaseDuration = uint(s.leaseTime.Seconds())
} else {
s.leaseTime = time.Second * time.Duration(s.LeaseDuration)
}
s.leaseStart, err = parseIPv4(s.RangeStart)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range start address %s", s.RangeStart)
}
s.leaseStop, err = parseIPv4(s.RangeEnd)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse range end address %s", s.RangeEnd)
}
subnet, err := parseIPv4(s.SubnetMask)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse subnet mask %s", s.SubnetMask)
}
// if !bytes.Equal(subnet, s.ipnet.Mask) {
// s.closeConn() // in case it was already started
// return wrapErrPrint(err, "specified subnet mask %s does not meatch interface %s subnet mask %s", s.SubnetMask, s.InterfaceName, s.ipnet.Mask)
// }
router, err := parseIPv4(s.GatewayIP)
if err != nil {
s.closeConn() // in case it was already started
return wrapErrPrint(err, "Failed to parse gateway IP %s", s.GatewayIP)
}
s.leaseOptions = dhcp4.Options{
dhcp4.OptionSubnetMask: subnet,
dhcp4.OptionRouter: router,
dhcp4.OptionDomainNameServer: s.ipnet.IP,
}
// TODO: don't close if interface and addresses are the same
if s.conn != nil {
s.closeConn()
}
s.dbLoad()
c, err := newFilterConn(*iface, ":67") // it has to be bound to 0.0.0.0:67, otherwise it won't see DHCP discover/request packets
if err != nil {
return wrapErrPrint(err, "Couldn't start listening socket on 0.0.0.0:67")
}
log.Info("DHCP: listening on 0.0.0.0:67")
s.conn = c
s.cond = sync.NewCond(&s.mutex)
s.running = true
go func() {
// operate on c instead of c.conn because c.conn can change over time
err := dhcp4.Serve(c, s)
if err != nil && !s.stopping {
log.Printf("dhcp4.Serve() returned with error: %s", err)
}
c.Close() // in case Serve() exits for other reason than listening socket closure
s.running = false
s.cond.Signal()
}()
return nil
}
// Stop closes the listening UDP socket
func (s *Server) Stop() error {
if s.conn == nil {
// nothing to do, return silently
return nil
}
s.stopping = true
err := s.closeConn()
if err != nil {
return wrapErrPrint(err, "Couldn't close UDP listening socket")
}
// We've just closed the listening socket.
// Worker thread should exit right after it tries to read from the socket.
s.mutex.Lock()
for s.running {
s.cond.Wait()
}
s.mutex.Unlock()
s.dbStore()
return nil
}
// closeConn will close the connection and set it to zero
func (s *Server) closeConn() error {
if s.conn == nil {
return nil
}
err := s.conn.Close()
s.conn = nil
return err
}
// Reserve a lease for the client
func (s *Server) reserveLease(p dhcp4.Packet) (*Lease, error) {
// WARNING: do not remove copy()
// the given hwaddr by p.CHAddr() in the packet survives only during ServeDHCP() call
// since we need to retain it we need to make our own copy
hwaddrCOW := p.CHAddr()
hwaddr := make(net.HardwareAddr, len(hwaddrCOW))
copy(hwaddr, hwaddrCOW)
// not assigned a lease, create new one, find IP from LRU
hostname := p.ParseOptions()[dhcp4.OptionHostName]
lease := &Lease{HWAddr: hwaddr, Hostname: string(hostname)}
log.Tracef("Lease not found for %s: creating new one", hwaddr)
ip, err := s.findFreeIP(hwaddr)
if err != nil {
i := s.findExpiredLease()
if i < 0 {
return nil, wrapErrPrint(err, "Couldn't find free IP for the lease %s", hwaddr.String())
}
log.Tracef("Assigning IP address %s to %s (lease for %s expired at %s)",
s.leases[i].IP, hwaddr, s.leases[i].HWAddr, s.leases[i].Expiry)
lease.IP = s.leases[i].IP
s.Lock()
s.leases[i] = lease
s.Unlock()
s.reserveIP(lease.IP, hwaddr)
return lease, nil
}
log.Tracef("Assigning to %s IP address %s", hwaddr, ip.String())
lease.IP = ip
s.Lock()
s.leases = append(s.leases, lease)
s.Unlock()
return lease, nil
}
// Find a lease for the client
func (s *Server) findLease(p dhcp4.Packet) *Lease {
hwaddr := p.CHAddr()
for i := range s.leases {
if bytes.Equal([]byte(hwaddr), []byte(s.leases[i].HWAddr)) {
// log.Tracef("bytes.Equal(%s, %s) returned true", hwaddr, s.leases[i].hwaddr)
return s.leases[i]
}
}
return nil
}
// Find an expired lease and return its index or -1
func (s *Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() <= now {
return i
}
}
return -1
}
func (s *Server) findFreeIP(hwaddr net.HardwareAddr) (net.IP, error) {
// if IP pool is nil, lazy initialize it
if s.IPpool == nil {
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
// go from start to end, find unreserved IP
var foundIP net.IP
for i := 0; i < dhcp4.IPRange(s.leaseStart, s.leaseStop); i++ {
newIP := dhcp4.IPAdd(s.leaseStart, i)
foundHWaddr := s.findReservedHWaddr(newIP)
log.Tracef("tried IP %v, got hwaddr %v", newIP, foundHWaddr)
if foundHWaddr != nil && len(foundHWaddr) != 0 {
// if !bytes.Equal(foundHWaddr, hwaddr) {
// log.Tracef("SHOULD NOT HAPPEN: hwaddr in IP pool %s is not equal to hwaddr in lease %s", foundHWaddr, hwaddr)
// }
continue
}
foundIP = newIP
break
}
if foundIP == nil {
// TODO: LRU
return nil, fmt.Errorf("couldn't find free entry in IP pool")
}
s.reserveIP(foundIP, hwaddr)
return foundIP, nil
}
func (s *Server) findReservedHWaddr(ip net.IP) net.HardwareAddr {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
return s.IPpool[IP4]
}
func (s *Server) reserveIP(ip net.IP, hwaddr net.HardwareAddr) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
s.IPpool[IP4] = hwaddr
}
func (s *Server) unreserveIP(ip net.IP) {
rawIP := []byte(ip)
IP4 := [4]byte{rawIP[0], rawIP[1], rawIP[2], rawIP[3]}
delete(s.IPpool, IP4)
}
// ServeDHCP handles an incoming DHCP request
func (s *Server) | (p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {
s.printLeases()
switch msgType {
case dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?
return s.handleDiscover(p, options)
case dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)
// start/renew a lease -- update lease time
// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request
return s.handleDHCP4Request(p, options)
case dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP
return s.handleDecline(p, options)
case dhcp4.Release: // From Client, I don't need that IP anymore
return s.handleRelease(p, options)
case dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it
return s.handleInform(p, options)
// from server -- ignore those but enumerate just in case
case dhcp4.Offer: // Broadcast From Server - Here's an IP
log.Printf("DHCP: received message from %s: Offer", p.CHAddr())
case dhcp4.ACK: // From Server, Yes you can have that IP
log.Printf("DHCP: received message from %s: ACK", p.CHAddr())
case dhcp4.NAK: // From Server, No you cannot have that IP
log.Printf("DHCP: received message from %s: NAK", p.CHAddr())
default:
log.Printf("DHCP: unknown packet %v from %s", msgType, p.CHAddr())
return nil
}
return nil
}
// Send ICMP to the specified machine
// Return TRUE if it doesn't reply, which probably means that the IP is available
func (s *Server) addrAvailable(target net.IP) bool {
if s.ICMPTimeout == 0 {
return true
}
pinger, err := ping.NewPinger(target.String())
if err != nil {
log.Error("ping.NewPinger(): %v", err)
return true
}
pinger.SetPrivileged(true)
pinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond
pinger.Count = 1
reply := false
pinger.OnRecv = func(pkt *ping.Packet) {
// log.Tracef("Received ICMP Reply from %v", target)
reply = true
}
log.Tracef("Sending ICMP Echo to %v", target)
pinger.Run()
if reply {
log.Info("DHCP: IP conflict: %v is already used by another device", target)
return false
}
log.Tracef("ICMP procedure is complete: %v", target)
return true
}
// Add the specified IP to the black list for a time period
func (s *Server) blacklistLease(lease *Lease) {
hw := make(net.HardwareAddr, 6)
s.reserveIP(lease.IP, hw)
s.Lock()
lease.HWAddr = hw
lease.Hostname = ""
lease.Expiry = time.Now().Add(s.leaseTime)
s.Unlock()
}
// Return TRUE if DHCP packet is correct
func isValidPacket(p dhcp4.Packet) bool {
hw := p.CHAddr()
zeroes := make([]byte, len(hw))
if bytes.Equal(hw, zeroes) {
log.Tracef("Packet has empty CHAddr")
return false
}
return true
}
func (s *Server) handleDiscover(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
// find a lease, but don't update lease time
var lease *Lease
var err error
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
hostname := p.ParseOptions()[dhcp4.OptionHostName]
log.Tracef("Message from client: Discover. ReqIP: %s HW: %s Hostname: %s",
reqIP, p.CHAddr(), hostname)
if !isValidPacket(p) {
return nil
}
lease = s.findLease(p)
for lease == nil {
lease, err = s.reserveLease(p)
if err != nil {
log.Error("Couldn't find free lease: %s", err)
return nil
}
if !s.addrAvailable(lease.IP) {
s.blacklistLease(lease)
lease = nil
continue
}
break
}
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
reply := dhcp4.ReplyPacket(p, dhcp4.Offer, s.ipnet.IP, lease.IP, s.leaseTime, opt)
log.Tracef("Replying with offer: offered IP %v for %v with options %+v", lease.IP, s.leaseTime, reply.ParseOptions())
return reply
}
func (s *Server) handleDHCP4Request(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
var lease *Lease
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Request. IP: %s ReqIP: %s HW: %s",
p.CIAddr(), reqIP, p.CHAddr())
if !isValidPacket(p) {
return nil
}
server := options[dhcp4.OptionServerIdentifier]
if server != nil && !net.IP(server).Equal(s.ipnet.IP) {
log.Tracef("Request message not for this DHCP server (%v vs %v)", server, s.ipnet.IP)
return nil // Message not for this dhcp server
}
if reqIP == nil {
reqIP = p.CIAddr()
} else if reqIP == nil || reqIP.To4() == nil {
log.Tracef("Requested IP isn't a valid IPv4: %s", reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease = s.findLease(p)
if lease == nil {
log.Tracef("Lease for %s isn't found", p.CHAddr())
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
if !lease.IP.Equal(reqIP) {
log.Tracef("Lease for %s doesn't match requested/client IP: %s vs %s",
lease.HWAddr, lease.IP, reqIP)
return dhcp4.ReplyPacket(p, dhcp4.NAK, s.ipnet.IP, nil, 0, nil)
}
lease.Expiry = time.Now().Add(s.leaseTime)
log.Tracef("Replying with ACK. IP: %s HW: %s Expire: %s",
lease.IP, lease.HWAddr, lease.Expiry)
opt := s.leaseOptions.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])
return dhcp4.ReplyPacket(p, dhcp4.ACK, s.ipnet.IP, lease.IP, s.leaseTime, opt)
}
func (s *Server) handleInform(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Inform. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleRelease(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
log.Tracef("Message from client: Release. IP: %s HW: %s",
p.CIAddr(), p.CHAddr())
return nil
}
func (s *Server) handleDecline(p dhcp4.Packet, options dhcp4.Options) dhcp4.Packet {
reqIP := net.IP(options[dhcp4.OptionRequestedIPAddress])
log.Tracef("Message from client: Decline. IP: %s HW: %s",
reqIP, p.CHAddr())
return nil
}
// Leases returns the list of current DHCP leases (thread-safe)
func (s *Server) Leases() []Lease {
var result []Lease
now := time.Now().Unix()
s.RLock()
for _, lease := range s.leases {
if lease.Expiry.Unix() > now {
result = append(result, *lease)
}
}
s.RUnlock()
return result
}
// Print information about the current leases
func (s *Server) printLeases() {
log.Tracef("Leases:")
for i, lease := range s.leases {
log.Tracef("Lease #%d: hwaddr %s, ip %s, expiry %s",
i, lease.HWAddr, lease.IP, lease.Expiry)
}
}
// Reset internal state
func (s *Server) reset() {
s.Lock()
s.leases = nil
s.Unlock()
s.IPpool = make(map[[4]byte]net.HardwareAddr)
}
| ServeDHCP | identifier_name |
index.js | if ('serviceWorker' in navigator) {
// register service worker
navigator.serviceWorker.register('service-worker.js')
}
const tableBuilder = {
/**
* minimum width in pixels for each column.
* @type {number}
*/
minWidthForColumn: 150,
/**
* default max number of columns
* @type {number}
*/
maxNumberOfColumnsEver: 12,
/**
* default minimum number of columns
* @type {number}
*/
minXDefault: 1,
/**
* default max number of columns
* @type {number}
*/
maxXDefault: 12,
/**
* default min number of row
* @type {number}
*/
minYDefault: 1,
/**
* default max number of rows
* @type {Integer}
*/
maxYDefault: 12,
/**
* calculated minimum number of columns
* @type {number}
*/
minX: 0,
/**
* calculated max number of columns
* @type {number}
*/
maxX: 0,
/**
* calculated minimum number of rows
* @type {number}
*/
minY: 0,
/**
* calculated maximum number of columns
* @type {number}
*/
maxY: 0,
/**
* is it a restart
* @type {boolean}
*/
restart: false,
/**
* viewport width
* @type {number}
*/
vw: 0,
/**
* viewport height
* @type {number}
*/
vh: 0,
/**
* reset SVG
* @type {string}
*/
restartSVG: `
<svg version="1.1" viewBox="0 0 178.2 186.08" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(-287.94 -456.48)" fill="none">
<path transform="matrix(.46642 -.98449 1.0097 .47838 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
g | (id) {
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
},
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case "ArrowUp":
newTabIndex = this.getPrevTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case "DownUp":
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
*/
}
},
/**
* task completed!
* returns true if task is completed.
* @param {number} x
* @return {boolean}
*/
levelUp: function (x) {
const selector = 'x-' + x
const cells = document.getElementsByClassName(selector)
let i = 0
for (i = 0; i < cells.length; i++) {
const cell = cells[i]
if (cell.tagName.toLowerCase() === 'td') {
if (!cell.childNodes[0].classList.contains('good')) {
return false
}
}
}
this.minX++
this.maxX++
this.init()
return true
},
setFirstThreeAnswers: function () {
const x = 1
let y = 1
let answer = null
let input = null
for (y = 1; y < 4; y++) {
input = this.getTabByXY(x, y)
answer = x * y
console.log('==================')
console.log('answer = ' + answer)
console.log(input)
console.log('==================')
}
},
//
// zeroFill: function (number, width) {
// width -= number.toString().length
// if (width > 0) {
// return new Array(width + (/\./.test(number) ? 2 : 1)).join('0') + number
// }
// return number + '' // always return a string
// },
/**
* cookie management
* @type {Object}
*/
myCookie: {
/**
* set a cookie value
* @param {string} name
* @param {mixed} value
* @param {number} days how long to keep it ?
*/
setCookie: function (name, value, days) {
let expires = ''
if (typeof days === 'undefined') {
days = 14
}
if (days) {
var date = new Date()
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000))
expires = '; expires=' + date.toUTCString()
}
// console.log('set cookie: '+name+'='+value);
document.cookie = name + '=' + (value || '') + expires + '; path=/'
},
/**
* get cookie value
* @param {string} name
* @return {mixed}
*/
getCookie: function (name) {
const nameEQ = name + '='
const ca = document.cookie.split(';')
for (let i = 0; i < ca.length; i++) {
let c = ca[i]
while (c.charAt(0) === ' ') {
c = c.substring(1, c.length)
}
if (c.indexOf(nameEQ) === 0) {
const value = c.substring(nameEQ.length, c.length)
// console.log('get cookie: '+name+'='+value);
return value
}
}
return null
},
eraseCookie: function (name) {
// console.log('erase cookie: '+name);
this.setCookie(name, null, 0)
}
},
/**
* get a unique number that always prioritises X over Y
* e.g. if x is 3 and y is 7 then the number is 30000000007000
* @param {number} x
* @param {number} y
* @return {number}
*/
getTabIndex: function (x, y) {
return (10000000 * x) + y
},
/**
* find a cell by tab index
* @param {number} x
* @param {number} y
* @return {object|null}
*/
getTabByXY: function (x, y) {
const getNextTabIndexValue = this.getTabIndex(x, y)
const selector = 'input[tabindex=\'' + getNextTabIndexValue + '\']'
// console.log(selector);
// console.log(document.querySelector(selector));
if (document.querySelector(selector)) {
return document.querySelector(selector)
}
},
getLeftTabIndex: function (x, y) {
console.log(this.maxXDefault)
if (x !== 1) {
x--
} else {
x = this.maxXDefault
}
return this.getTabByXY(x, y)
},
getRightTabIndex: function (x, y) {
if (x !== this.maxXDefault) {
x++
} else {
x = 1
}
return this.getTabByXY(x, y)
},
getPrevTabIndex: function (x, y) {
if (y === this.minY) {
y = this.maxY
} else {
y--
}
return this.getTabByXY(x, y)
},
getNextTabIndex: function (x, y) {
if (y === this.maxY) {
x++
y = this.minY
} else {
y++
}
return this.getTabByXY(x, y)
}
}
tableBuilder.init()
| etValue | identifier_name |
index.js | if ('serviceWorker' in navigator) {
// register service worker
navigator.serviceWorker.register('service-worker.js')
}
const tableBuilder = {
/**
* minimum width in pixels for each column.
* @type {number}
*/
minWidthForColumn: 150,
/**
* default max number of columns
* @type {number}
*/
maxNumberOfColumnsEver: 12,
/**
* default minimum number of columns
* @type {number}
*/
minXDefault: 1,
/**
* default max number of columns
* @type {number}
*/
maxXDefault: 12,
/**
* default min number of row
* @type {number}
*/
minYDefault: 1,
/**
* default max number of rows
* @type {Integer}
*/
maxYDefault: 12,
/**
* calculated minimum number of columns
* @type {number}
*/
minX: 0,
/**
* calculated max number of columns
* @type {number}
*/
maxX: 0,
/**
* calculated minimum number of rows
* @type {number}
*/
minY: 0,
/**
* calculated maximum number of columns
* @type {number}
*/
maxY: 0,
/**
* is it a restart
* @type {boolean}
*/
restart: false,
/**
* viewport width
* @type {number}
*/
vw: 0,
/**
* viewport height
* @type {number}
*/
vh: 0,
/**
* reset SVG
* @type {string}
*/
restartSVG: `
<svg version="1.1" viewBox="0 0 178.2 186.08" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(-287.94 -456.48)" fill="none">
<path transform="matrix(.46642 -.98449 1.0097 .47838 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
getValue (id) { |
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case "ArrowUp":
newTabIndex = this.getPrevTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case "DownUp":
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
*/
}
},
/**
* task completed!
* returns true if task is completed.
* @param {number} x
* @return {boolean}
*/
levelUp: function (x) {
const selector = 'x-' + x
const cells = document.getElementsByClassName(selector)
let i = 0
for (i = 0; i < cells.length; i++) {
const cell = cells[i]
if (cell.tagName.toLowerCase() === 'td') {
if (!cell.childNodes[0].classList.contains('good')) {
return false
}
}
}
this.minX++
this.maxX++
this.init()
return true
},
setFirstThreeAnswers: function () {
const x = 1
let y = 1
let answer = null
let input = null
for (y = 1; y < 4; y++) {
input = this.getTabByXY(x, y)
answer = x * y
console.log('==================')
console.log('answer = ' + answer)
console.log(input)
console.log('==================')
}
},
//
// zeroFill: function (number, width) {
// width -= number.toString().length
// if (width > 0) {
// return new Array(width + (/\./.test(number) ? 2 : 1)).join('0') + number
// }
// return number + '' // always return a string
// },
/**
* cookie management
* @type {Object}
*/
myCookie: {
/**
* set a cookie value
* @param {string} name
* @param {mixed} value
* @param {number} days how long to keep it ?
*/
setCookie: function (name, value, days) {
let expires = ''
if (typeof days === 'undefined') {
days = 14
}
if (days) {
var date = new Date()
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000))
expires = '; expires=' + date.toUTCString()
}
// console.log('set cookie: '+name+'='+value);
document.cookie = name + '=' + (value || '') + expires + '; path=/'
},
/**
* get cookie value
* @param {string} name
* @return {mixed}
*/
getCookie: function (name) {
const nameEQ = name + '='
const ca = document.cookie.split(';')
for (let i = 0; i < ca.length; i++) {
let c = ca[i]
while (c.charAt(0) === ' ') {
c = c.substring(1, c.length)
}
if (c.indexOf(nameEQ) === 0) {
const value = c.substring(nameEQ.length, c.length)
// console.log('get cookie: '+name+'='+value);
return value
}
}
return null
},
eraseCookie: function (name) {
// console.log('erase cookie: '+name);
this.setCookie(name, null, 0)
}
},
/**
* get a unique number that always prioritises X over Y
* e.g. if x is 3 and y is 7 then the number is 30000000007000
* @param {number} x
* @param {number} y
* @return {number}
*/
getTabIndex: function (x, y) {
return (10000000 * x) + y
},
/**
* find a cell by tab index
* @param {number} x
* @param {number} y
* @return {object|null}
*/
getTabByXY: function (x, y) {
const getNextTabIndexValue = this.getTabIndex(x, y)
const selector = 'input[tabindex=\'' + getNextTabIndexValue + '\']'
// console.log(selector);
// console.log(document.querySelector(selector));
if (document.querySelector(selector)) {
return document.querySelector(selector)
}
},
getLeftTabIndex: function (x, y) {
console.log(this.maxXDefault)
if (x !== 1) {
x--
} else {
x = this.maxXDefault
}
return this.getTabByXY(x, y)
},
getRightTabIndex: function (x, y) {
if (x !== this.maxXDefault) {
x++
} else {
x = 1
}
return this.getTabByXY(x, y)
},
getPrevTabIndex: function (x, y) {
if (y === this.minY) {
y = this.maxY
} else {
y--
}
return this.getTabByXY(x, y)
},
getNextTabIndex: function (x, y) {
if (y === this.maxY) {
x++
y = this.minY
} else {
y++
}
return this.getTabByXY(x, y)
}
}
tableBuilder.init()
|
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
}, | identifier_body |
index.js | if ('serviceWorker' in navigator) {
// register service worker
navigator.serviceWorker.register('service-worker.js')
}
const tableBuilder = {
/**
* minimum width in pixels for each column.
* @type {number}
*/
minWidthForColumn: 150,
/**
* default max number of columns
* @type {number}
*/
maxNumberOfColumnsEver: 12,
/**
* default minimum number of columns
* @type {number}
*/
minXDefault: 1,
/**
* default max number of columns
* @type {number}
*/
maxXDefault: 12,
/**
* default min number of row
* @type {number}
*/
minYDefault: 1,
/**
* default max number of rows
* @type {Integer}
*/
maxYDefault: 12,
/**
* calculated minimum number of columns
* @type {number}
*/
minX: 0,
/**
* calculated max number of columns
* @type {number}
*/
maxX: 0,
/**
* calculated minimum number of rows
* @type {number}
*/
minY: 0,
/**
* calculated maximum number of columns
* @type {number}
*/
maxY: 0,
/**
* is it a restart
* @type {boolean}
*/
restart: false,
/**
* viewport width
* @type {number}
*/
vw: 0,
/**
* viewport height
* @type {number}
*/
vh: 0,
/**
* reset SVG | * @type {string}
*/
restartSVG: `
<svg version="1.1" viewBox="0 0 178.2 186.08" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(-287.94 -456.48)" fill="none">
<path transform="matrix(.46642 -.98449 1.0097 .47838 24.256 911.33)" d="m505.58 148.29a70.219 68.464 0 0 1-54.814 66.796 70.219 68.464 0 0 1-78.865-37.488 70.219 68.464 0 0 1 20.211-83.244 70.219 68.464 0 0 1 87.733 0.96318" stroke="#000" stroke-linecap="round" stroke-width="22.66"/>
<path d="m377.05 468.98v75.785" stroke="#000002" stroke-linecap="square" stroke-width="25"/>
</g>
</svg>`,
init: function (restart) {
// lets set the viewport: https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
this.vw = Math.max(document.documentElement.clientWidth, window.innerWidth || 0)
this.vh = Math.max(document.documentElement.clientHeight, window.innerHeight || 0)
// work out the number of columns to add
let additionalColumns = Math.floor(this.vw / this.minWidthForColumn)
const maxColumns = this.maxNumberOfColumnsEver - 1
if (additionalColumns > maxColumns) {
additionalColumns = maxColumns
}
// reset min and mix
this.maxXDefault = this.minXDefault + additionalColumns
if (restart === true || this.minX === 0) {
if (restart === true) {
this.restart = true
}
this.minX = this.minXDefault
this.minY = this.minYDefault
this.maxX = this.maxXDefault
this.maxY = this.maxYDefault
}
// start building HTML
let html = ''
html += this.getTableStart()
for (let y = 0; y <= this.maxY; y++) {
// if minY has not been reached yet, do the next loop
if (y > 0 && y < this.minY) {
continue
}
// start a row
html += this.getRowStart()
for (let x = 0; x <= this.maxX; x++) {
// if minX has not been reached yet, do the next loop
if (x > 0 && x < this.minX) {
continue
}
// build the cell
html += this.getCell(x, y)
}
html += this.getRowEnd()
}
html += this.getTableEnd()
document.getElementById('table-holder').innerHTML = html
this.setFirstThreeAnswers();
},
getTableStart: function () { return '<table><tbody>' },
getTableEnd: function () { return '</tbody></table>' },
getRowStart: function () { return '<tr>' },
getRowEnd: function () { return '</tr>' },
getRowHeader: function (y) {
return '<th scope="row" class="y-' + y + ' good">' + y + '</th>'
},
getColumnHeader: function (x) {
return '<th scope="col" class="x-' + x + ' good">' + x + '</th>'
},
getCell: function (x, y) {
if (x === 0 && y === 0) {
// HEADER-HEADER: this is the upper-left cell - the reset cell!
return '' +
'<th class="restart">' +
'<a href="#" ' +
'onclick="if(window.confirm(\'Delete all your answers and start again?\') === true) {tableBuilder.init(true);}">' +
this.restartSVG +
'</a> ' +
'</th>'
} else if (x === 0) {
// HEADER: get a new row (tr)
return this.getRowHeader(y)
} else if (y === 0) {
// HEADER: get a new column
return this.getColumnHeader(x)
} else {
// real cell!
const classX = 'x-' + x
const classY = 'y-' + y
const tabIndex = this.getTabIndex(x, y)
const id = 'input-' + x + 'x' + y
const value = this.getValue(id)
let classA = ''
if (value && value !== null) {
classA = 'good'
}
return '' +
'<td class="' + classX + ' ' + classY + '" >' +
'<input ' +
'type="number"' +
'id="' + id + '" ' +
'data-answer="' + (x * y) + '" ' +
'placeholder="' + x + '×' + y + '" ' +
'onkeyup="tableBuilder.test(event,this,' + x + ', ' + y + ', false);" ' +
'onblur="tableBuilder.test(this,' + x + ', ' + y + ', false);" ' +
'onchange="tableBuilder.test(this,' + x + ', ' + y + ', true);" ' +
'pattern="[0-9]" ' +
'tabindex="' + tabIndex + '" ' +
'value="' + value + '" ' +
'class="' + classA + '" ' +
'/>' +
'</td>'
}
},
getValue (id) {
let value = ''
if (this.restart) {
this.myCookie.eraseCookie(id)
value = ''
} else {
value = this.myCookie.getCookie(id)
if (value === null) {
value = ''
}
}
return value
},
/**
* test if the entered value is correct?
* @param {object} event - what event caused the test?
* @param {object} el - element being tested
* @param {number} x - the value for x
* @param {number} y - the value for y
* @param {boolean} testGrid - ????
*/
test: function (event, el, x, y, testGrid) {
// what is the answer
const test = x * y
const answer = parseInt(el.value)
if (!answer || isNaN(answer)) {
// no answer!
this.makeNothing(el)
} else {
// test answer ...
const newGoodAnswer = !el.classList.contains('good')
if (answer === test) {
// right answer
this.makeGood(el)
// save cookie
this.myCookie.setCookie(el.id, answer)
// find next answer!
if (newGoodAnswer) {
const newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
}
// if(y === this.maxY && testGrid) {
// this.levelUp(x);
// }
} else {
// bad answer!
this.makeBad(el)
}
}
this.keyPressed(event, x, y)
},
makeNothing: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.remove('good')
el.classList.add('nothing')
}
},
/**
* bad answer
*/
makeGood: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('bad')
el.classList.add('good')
}
},
/**
* good answer
*/
makeBad: function (el) {
if (typeof el.classList !== 'undefined') {
el.classList.remove('good')
el.classList.add('bad')
}
},
/**
* action key being pressed
* @param {object} event
* @param {number} x
* @param {number} y
*/
keyPressed: function (event, x, y) {
let newTabIndex
switch (event.code) {
case 'Enter':
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowLeft':
newTabIndex = this.getLeftTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case 'ArrowRight':
newTabIndex = this.getRightTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
/*
This clashes with the number input type arrow key functionality
----
case "ArrowUp":
newTabIndex = this.getPrevTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
case "DownUp":
newTabIndex = this.getNextTabIndex(x, y)
if (newTabIndex) {
newTabIndex.focus()
}
break
*/
}
},
/**
* task completed!
* returns true if task is completed.
* @param {number} x
* @return {boolean}
*/
levelUp: function (x) {
const selector = 'x-' + x
const cells = document.getElementsByClassName(selector)
let i = 0
for (i = 0; i < cells.length; i++) {
const cell = cells[i]
if (cell.tagName.toLowerCase() === 'td') {
if (!cell.childNodes[0].classList.contains('good')) {
return false
}
}
}
this.minX++
this.maxX++
this.init()
return true
},
setFirstThreeAnswers: function () {
const x = 1
let y = 1
let answer = null
let input = null
for (y = 1; y < 4; y++) {
input = this.getTabByXY(x, y)
answer = x * y
console.log('==================')
console.log('answer = ' + answer)
console.log(input)
console.log('==================')
}
},
//
// zeroFill: function (number, width) {
// width -= number.toString().length
// if (width > 0) {
// return new Array(width + (/\./.test(number) ? 2 : 1)).join('0') + number
// }
// return number + '' // always return a string
// },
/**
* cookie management
* @type {Object}
*/
myCookie: {
/**
* set a cookie value
* @param {string} name
* @param {mixed} value
* @param {number} days how long to keep it ?
*/
setCookie: function (name, value, days) {
let expires = ''
if (typeof days === 'undefined') {
days = 14
}
if (days) {
var date = new Date()
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000))
expires = '; expires=' + date.toUTCString()
}
// console.log('set cookie: '+name+'='+value);
document.cookie = name + '=' + (value || '') + expires + '; path=/'
},
/**
* get cookie value
* @param {string} name
* @return {mixed}
*/
getCookie: function (name) {
const nameEQ = name + '='
const ca = document.cookie.split(';')
for (let i = 0; i < ca.length; i++) {
let c = ca[i]
while (c.charAt(0) === ' ') {
c = c.substring(1, c.length)
}
if (c.indexOf(nameEQ) === 0) {
const value = c.substring(nameEQ.length, c.length)
// console.log('get cookie: '+name+'='+value);
return value
}
}
return null
},
eraseCookie: function (name) {
// console.log('erase cookie: '+name);
this.setCookie(name, null, 0)
}
},
/**
* get a unique number that always prioritises X over Y
* e.g. if x is 3 and y is 7 then the number is 30000000007000
* @param {number} x
* @param {number} y
* @return {number}
*/
getTabIndex: function (x, y) {
return (10000000 * x) + y
},
/**
* find a cell by tab index
* @param {number} x
* @param {number} y
* @return {object|null}
*/
getTabByXY: function (x, y) {
const getNextTabIndexValue = this.getTabIndex(x, y)
const selector = 'input[tabindex=\'' + getNextTabIndexValue + '\']'
// console.log(selector);
// console.log(document.querySelector(selector));
if (document.querySelector(selector)) {
return document.querySelector(selector)
}
},
getLeftTabIndex: function (x, y) {
console.log(this.maxXDefault)
if (x !== 1) {
x--
} else {
x = this.maxXDefault
}
return this.getTabByXY(x, y)
},
getRightTabIndex: function (x, y) {
if (x !== this.maxXDefault) {
x++
} else {
x = 1
}
return this.getTabByXY(x, y)
},
getPrevTabIndex: function (x, y) {
if (y === this.minY) {
y = this.maxY
} else {
y--
}
return this.getTabByXY(x, y)
},
getNextTabIndex: function (x, y) {
if (y === this.maxY) {
x++
y = this.minY
} else {
y++
}
return this.getTabByXY(x, y)
}
}
tableBuilder.init() | random_line_split | |
parse_files_for_transcripts.py | #!/usr/bin/python
import sys, getopt, os
import re
from operator import itemgetter
## Parse Pelechano datafiles ###
def main(argv):
sorted_keys = list()
data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format
firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def | (dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + ",".join(each.keys())
# print "t values: "+ "|".join(each.values())
#
# skip if transcript length is less than overlap requirement of the ORF length
if trans_length < feat_overlap:
continue
# if it is greater than or equal to overlap length, then see if it overlaps the ORF the correct amount then add to the
# 1. transcript_start is less than or eq to ORF start and trans_stop is greater than or eq to overlap req of the ORF (5' overlap AND full coverage)
# 2. transcript start is greater than ORF start, but less than max_feat_start and transcript stop is greater than ORF stop (3' overlap)
# 3. transcript start is greater than ORF start and less than max_feat_start AND transcript stop is less than ORF stop
# (get the diff between trans and ORF start and then add that to the min_feat_stop to find new min_feat_stop)
# print "transcript: " + str(trans_start) + " to " + str(trans_stop) + "; length: "+ str(trans_length)
# print "gene: " + str(feat_start) + " to " + str(feat_stop) + "; " + str(feat_overlap) + "bp overlap"
#
if (trans_start <= feat_start and min_feat_stop <= trans_stop) or (feat_start <= trans_start <= max_feat_start and trans_stop > feat_stop) or (feat_start <= trans_start <= max_feat_start and (min_feat_stop + (trans_start - feat_start)) <= trans_stop):
# calculate the % overlap #
each['per_overlap'] = str(_calc_overlap(trans_start, trans_stop, feat_start, feat_stop))
# print 'match for ' + feat_name + ": feat coord - " + str(feat_start) + "," + str(feat_stop) + " and match: " + str(trans_start) +"-"+ str(trans_stop)
# print 'chr:' + each['chr'] + ' and strand ' + each['strand'] + ' match feature ' + feat_element['chr'] + ' and strand ' + feat_element['strand']
each['notes'] = notes # add notes
each['feat'] = feat_name
print each['id'] + ' overlaps ' + each['feat']
filtered_data.append(each) # add to filtered data array
# add to list of list elements to remove from list at the end
# match_list.append(slice)
# sys.exit()
else:
continue
# print 'no match'
# sys.exit()
# slice = slice + 1 # increment slice
# print '# transcripts before sort: ' + str(len(filtered_data))
# listsorted = sorted(XWordDict, key=lambda x: int(operator.itemgetter("pos")(x)))
#sort_data = sorted(filtered_data, int(key=itemgetter('score')), reverse=True)
#sort_data = sorted(filtered_data, key = lambda x: int(operator.itemgetter("score")(x)), reverse=True)
#listsorted = sorted(XWordDict, key=lambda x: int(x['pos']))
sort_data = sorted(filtered_data, key=lambda x: (int(x['score']), int(x['per_overlap'])), reverse=True) # sort by score and then by % overlap
# if len(sort_data) > 0:
# for obj in sort_data:
# print "keys: " + "|".join(obj.keys())
# print "vals: " + ",".join(obj.values())
# sys.exit()
return sort_data
def _parse_sac_gff(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
feat_count = 1
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
#line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
line_array = line.split("\t")
if len(line_array) <= 1: ## skip any line that can't be parsed
continue
key = line_array[0]
notes_array = line_array[8].rstrip().split(';')
feat_name = notes_array[0].replace("ID=","")
# print "data: " + line
# if key exists already, then add to data array
if key in data_dict.keys():
#feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'feat_name': feat_name, 'chr': line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2],'notes': line_array[8].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'feat_name': feat_name, 'chr':line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2], 'notes':line_array[8].rstrip()}]
# add to feat_count
# for chr in data_dict.keys():
# print "chr:" + chr
# for row in data_dict[chr]:
# if row['feat_type'] == 'gene':
return (data_dict)
def _open_make_hash(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
# print "data: " + line
line_array = line.split("\t")
key = line_array[0]
# if key exists already, then add to data array
if key in data_dict.keys():
# feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'id': "".join(line_array[0:5]), 'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'id': "".join(line_array[0:5]),'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()}]
# add to feat_count
# for key in data_dict.keys():
# print key + ": num transcripts = " + str(len(data_dict[key]))#
# sys.exit()
return (data_dict)
if __name__ == "__main__":
main(sys.argv[1:]) | _print_gff | identifier_name |
parse_files_for_transcripts.py | #!/usr/bin/python
import sys, getopt, os
import re
from operator import itemgetter
## Parse Pelechano datafiles ###
def main(argv):
sorted_keys = list()
data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format
firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
|
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + ",".join(each.keys())
# print "t values: "+ "|".join(each.values())
#
# skip if transcript length is less than overlap requirement of the ORF length
if trans_length < feat_overlap:
continue
# if it is greater than or equal to overlap length, then see if it overlaps the ORF the correct amount then add to the
# 1. transcript_start is less than or eq to ORF start and trans_stop is greater than or eq to overlap req of the ORF (5' overlap AND full coverage)
# 2. transcript start is greater than ORF start, but less than max_feat_start and transcript stop is greater than ORF stop (3' overlap)
# 3. transcript start is greater than ORF start and less than max_feat_start AND transcript stop is less than ORF stop
# (get the diff between trans and ORF start and then add that to the min_feat_stop to find new min_feat_stop)
# print "transcript: " + str(trans_start) + " to " + str(trans_stop) + "; length: "+ str(trans_length)
# print "gene: " + str(feat_start) + " to " + str(feat_stop) + "; " + str(feat_overlap) + "bp overlap"
#
if (trans_start <= feat_start and min_feat_stop <= trans_stop) or (feat_start <= trans_start <= max_feat_start and trans_stop > feat_stop) or (feat_start <= trans_start <= max_feat_start and (min_feat_stop + (trans_start - feat_start)) <= trans_stop):
# calculate the % overlap #
each['per_overlap'] = str(_calc_overlap(trans_start, trans_stop, feat_start, feat_stop))
# print 'match for ' + feat_name + ": feat coord - " + str(feat_start) + "," + str(feat_stop) + " and match: " + str(trans_start) +"-"+ str(trans_stop)
# print 'chr:' + each['chr'] + ' and strand ' + each['strand'] + ' match feature ' + feat_element['chr'] + ' and strand ' + feat_element['strand']
each['notes'] = notes # add notes
each['feat'] = feat_name
print each['id'] + ' overlaps ' + each['feat']
filtered_data.append(each) # add to filtered data array
# add to list of list elements to remove from list at the end
# match_list.append(slice)
# sys.exit()
else:
continue
# print 'no match'
# sys.exit()
# slice = slice + 1 # increment slice
# print '# transcripts before sort: ' + str(len(filtered_data))
# listsorted = sorted(XWordDict, key=lambda x: int(operator.itemgetter("pos")(x)))
#sort_data = sorted(filtered_data, int(key=itemgetter('score')), reverse=True)
#sort_data = sorted(filtered_data, key = lambda x: int(operator.itemgetter("score")(x)), reverse=True)
#listsorted = sorted(XWordDict, key=lambda x: int(x['pos']))
sort_data = sorted(filtered_data, key=lambda x: (int(x['score']), int(x['per_overlap'])), reverse=True) # sort by score and then by % overlap
# if len(sort_data) > 0:
# for obj in sort_data:
# print "keys: " + "|".join(obj.keys())
# print "vals: " + ",".join(obj.values())
# sys.exit()
return sort_data
def _parse_sac_gff(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
feat_count = 1
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
#line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
line_array = line.split("\t")
if len(line_array) <= 1: ## skip any line that can't be parsed
continue
key = line_array[0]
notes_array = line_array[8].rstrip().split(';')
feat_name = notes_array[0].replace("ID=","")
# print "data: " + line
# if key exists already, then add to data array
if key in data_dict.keys():
#feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'feat_name': feat_name, 'chr': line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2],'notes': line_array[8].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'feat_name': feat_name, 'chr':line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2], 'notes':line_array[8].rstrip()}]
# add to feat_count
# for chr in data_dict.keys():
# print "chr:" + chr
# for row in data_dict[chr]:
# if row['feat_type'] == 'gene':
return (data_dict)
def _open_make_hash(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
# print "data: " + line
line_array = line.split("\t")
key = line_array[0]
# if key exists already, then add to data array
if key in data_dict.keys():
# feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'id': "".join(line_array[0:5]), 'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'id': "".join(line_array[0:5]),'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()}]
# add to feat_count
# for key in data_dict.keys():
# print key + ": num transcripts = " + str(len(data_dict[key]))#
# sys.exit()
return (data_dict)
if __name__ == "__main__":
main(sys.argv[1:]) | continue | conditional_block |
parse_files_for_transcripts.py | #!/usr/bin/python
import sys, getopt, os
import re
from operator import itemgetter
## Parse Pelechano datafiles ### | data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format
firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ###
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + ",".join(each.keys())
# print "t values: "+ "|".join(each.values())
#
# skip if transcript length is less than overlap requirement of the ORF length
if trans_length < feat_overlap:
continue
# if it is greater than or equal to overlap length, then see if it overlaps the ORF the correct amount then add to the
# 1. transcript_start is less than or eq to ORF start and trans_stop is greater than or eq to overlap req of the ORF (5' overlap AND full coverage)
# 2. transcript start is greater than ORF start, but less than max_feat_start and transcript stop is greater than ORF stop (3' overlap)
# 3. transcript start is greater than ORF start and less than max_feat_start AND transcript stop is less than ORF stop
# (get the diff between trans and ORF start and then add that to the min_feat_stop to find new min_feat_stop)
# print "transcript: " + str(trans_start) + " to " + str(trans_stop) + "; length: "+ str(trans_length)
# print "gene: " + str(feat_start) + " to " + str(feat_stop) + "; " + str(feat_overlap) + "bp overlap"
#
if (trans_start <= feat_start and min_feat_stop <= trans_stop) or (feat_start <= trans_start <= max_feat_start and trans_stop > feat_stop) or (feat_start <= trans_start <= max_feat_start and (min_feat_stop + (trans_start - feat_start)) <= trans_stop):
# calculate the % overlap #
each['per_overlap'] = str(_calc_overlap(trans_start, trans_stop, feat_start, feat_stop))
# print 'match for ' + feat_name + ": feat coord - " + str(feat_start) + "," + str(feat_stop) + " and match: " + str(trans_start) +"-"+ str(trans_stop)
# print 'chr:' + each['chr'] + ' and strand ' + each['strand'] + ' match feature ' + feat_element['chr'] + ' and strand ' + feat_element['strand']
each['notes'] = notes # add notes
each['feat'] = feat_name
print each['id'] + ' overlaps ' + each['feat']
filtered_data.append(each) # add to filtered data array
# add to list of list elements to remove from list at the end
# match_list.append(slice)
# sys.exit()
else:
continue
# print 'no match'
# sys.exit()
# slice = slice + 1 # increment slice
# print '# transcripts before sort: ' + str(len(filtered_data))
# listsorted = sorted(XWordDict, key=lambda x: int(operator.itemgetter("pos")(x)))
#sort_data = sorted(filtered_data, int(key=itemgetter('score')), reverse=True)
#sort_data = sorted(filtered_data, key = lambda x: int(operator.itemgetter("score")(x)), reverse=True)
#listsorted = sorted(XWordDict, key=lambda x: int(x['pos']))
sort_data = sorted(filtered_data, key=lambda x: (int(x['score']), int(x['per_overlap'])), reverse=True) # sort by score and then by % overlap
# if len(sort_data) > 0:
# for obj in sort_data:
# print "keys: " + "|".join(obj.keys())
# print "vals: " + ",".join(obj.values())
# sys.exit()
return sort_data
def _parse_sac_gff(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
feat_count = 1
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
#line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
line_array = line.split("\t")
if len(line_array) <= 1: ## skip any line that can't be parsed
continue
key = line_array[0]
notes_array = line_array[8].rstrip().split(';')
feat_name = notes_array[0].replace("ID=","")
# print "data: " + line
# if key exists already, then add to data array
if key in data_dict.keys():
#feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'feat_name': feat_name, 'chr': line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2],'notes': line_array[8].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'feat_name': feat_name, 'chr':line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2], 'notes':line_array[8].rstrip()}]
# add to feat_count
# for chr in data_dict.keys():
# print "chr:" + chr
# for row in data_dict[chr]:
# if row['feat_type'] == 'gene':
return (data_dict)
def _open_make_hash(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
# print "data: " + line
line_array = line.split("\t")
key = line_array[0]
# if key exists already, then add to data array
if key in data_dict.keys():
# feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'id': "".join(line_array[0:5]), 'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'id': "".join(line_array[0:5]),'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()}]
# add to feat_count
# for key in data_dict.keys():
# print key + ": num transcripts = " + str(len(data_dict[key]))#
# sys.exit()
return (data_dict)
if __name__ == "__main__":
main(sys.argv[1:]) |
def main(argv):
sorted_keys = list() | random_line_split |
parse_files_for_transcripts.py | #!/usr/bin/python
import sys, getopt, os
import re
from operator import itemgetter
## Parse Pelechano datafiles ###
def main(argv):
|
# if 'format' in vars():
# # reformatted_data = _reformat_data(data_to_filter, mapfile)
#
# ### creating reformatted file
#
# final_file = open(outfile, 'w')
#
# ## print header row
# # final_file.write ("regulator feature name\tregulator gene name\ttarget feature name\ttarget gene name\tvalue\tstrain\n")
#
def _print_gff(dict_to_print, outfile):
print "making GFF file"
#print headers #
newfile = open(outfile, 'w')
newfile.write("## GFF file for transcripts\n")
chr_order = ['chrI','chrII','chrIII','chrIV','chrV','chrVI','chrVII','chrVIII','chrIX','chrX','chrXI','chrXII','chrXIII','chrXIV','chrXV','chrXVI']
for chr in chr_order:
transcript_data = dict_to_print[chr]
for key in sorted(transcript_data.keys()):
print 'feature: ' + key + '# of transcripts: ' + str(len(transcript_data[key]))
if len(transcript_data[key]) < 1:
continue
for track in transcript_data[key]:
print 'adding ' + key + ' to file'
# print "|".join(track.values())
notes = track.get('notes', ".")
newfile.write("\t".join([chr,'rtracklayer_'+key,'sequence_feature',track['start'], track['stop'], track['score'],track['strand'],'.',notes]))
newfile.write("\n")
# sys.exit
# def _make_bed_file(dict_to_print, outfile):
#
def _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap):
transcripts = list()
chrom_plus = list()
chrom_minus = list()
newdata = dict()
unfiltered_data = dict()
unmatched_trans = dict()
remove_list = dict()
for chromosome in genome_data.keys():
if chromosome == 'chrmt': # skip mito chromosome
print 'skipping mito'
continue
matching_file_data = file_data[chromosome]
# unmatched_trans = matching_file_data
# print 'chromosome:' + chromosome + ':' + matching_file_data[0]['chr']
#sys.exit()
## split by strand ##
for transcript in matching_file_data:
if transcript['strand'] == '+':
chrom_plus.append(transcript)
else:
chrom_minus.append(transcript)
print '# transcripts to search: ' + str(len(matching_file_data))
print '# plus strand: ' + str(len(chrom_plus))
print '# minus strand: ' + str(len(chrom_minus))
if feat_type != 'all': ## use specific feature type if specified
for element in genome_data[chromosome]:
# print feat_type + ':' + element['feat_type']
if element['feat_type'] != feat_type:
# print 'feature types don\'t match'
continue
else: #matching feature types
data_to_search = chrom_plus #default is plus strand
if element['strand'] == '-':
data_to_search = chrom_minus
transcripts = _find_overlapping_transcripts(element, data_to_search, overlap) # one feature, finding overlaps
# print 'number of transcripts: ' + str(len(transcripts))
# if len(transcripts) > 0:
# print '|'.join([d['key'] for d in transcripts])
# sys.exit()
# print element['feat_type'] + "->" + chromosome + " " + str(element['start']) + " to " + str(element['stop']) + ":" + element['notes'] + " has " + str(len(transcripts)) + ' number of matches\n'
# notes_array = element['notes'].split(';')
feat_id = element['feat_name']
# print "feat:" + feat_id
if len(transcripts) == 0: # skip if no matches
continue
## add to unfiltered list and Add 'key' to remove from list
if chromosome in unfiltered_data.keys():
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome].extend([d['id'].rstrip() for d in transcripts])
else:
unfiltered_data[chromosome] = dict()
unfiltered_data[chromosome][feat_id] = transcripts
remove_list[chromosome] = [d['id'].rstrip() for d in transcripts]
## filter transcripts by score or number
if filter_type == 'count': # filter by count
max_index = int(filter_val) - 1
if chromosome in newdata.keys():
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else:
newdata[chromosome] = dict()
newdata[chromosome][feat_id] = transcripts[0:filter_val]
else: # filter by score
for each in transcripts:
if each['score'] >= filter_val: # if score is > or = to cut off, add to array
newdata[chromosome][feat_id].append(each)
# print "transcripts: "
# for item in newdata[chromosome][feat_id]:
# print "keys: " + "|".join(item.keys())
# print 'vals: ' + ','.join(item.values())
# sys.exit()
## take matched transcripts out of original set; use 'id' to find duplicates between original list and matched list
####################################################################################################################
# remove matching transcripts from original chromosome list #
# a = [x for x in a if x['link'] not in b]
## unmatched transcripts for chrV: 19983 ##
print "|".join(file_data.keys())
print "keys for remove list: " + ":".join(remove_list.keys())
all_ids_to_remove = list()
for one in remove_list.keys():
all_ids_to_remove.extend(remove_list[one])
for each_chr in remove_list.keys():
print "removing matching transcripts from " + each_chr
print "# of matches (length of remove list): " + str(len(remove_list[each_chr]))
print "original # of transcripts (length of original file with transcripts on that chr): " + str(len(file_data[each_chr]))
unmatched_trans[each_chr] = dict()
unmatched_trans[each_chr][each_chr + "_non_ORF"] = [x for x in file_data[each_chr] if x['id'] not in all_ids_to_remove]
#
return (unfiltered_data, newdata, unmatched_trans)
def _calc_overlap(trans_start, trans_stop, feat_start, feat_stop):
feat_length = feat_stop - feat_start
trans_length = trans_stop - trans_start
if trans_start > feat_start and trans_stop > feat_stop: # transcript starts inside the feature and ends outside the feature
return (feat_stop - trans_start)/feat_length
elif trans_start <= feat_start and trans_stop < feat_stop: # transcript starts outside of feature and ends inside feature
return (trans_stop - feat_start)/feat_length
elif trans_start > feat_start and trans_stop < feat_stop: # transcript starts and ends within the feature
return (trans_length/trans_stop)
else: # transcript completely overlaps the feature -- trans_start <= feat_start AND trans_stop >= feat_stop
return 1
def _find_overlapping_transcripts(feat_element, trans_data, overlap):
feat_start = int(feat_element['start'])
feat_stop = int(feat_element['stop'])
strand = feat_element['strand']
notes = feat_element['notes']
feat_type = feat_element['feat_type']
feat_name = feat_element['feat_name'] #notes_array[0].replace("ID=","")
## calculate the range for start and stops to cover the overlap of the ORF
feat_length = int(feat_stop - feat_start)
feat_overlap = int(float(feat_length) * float(overlap))
min_feat_start = int(feat_start - feat_overlap)
min_feat_stop = int(feat_start + feat_overlap) # start plus the percent of overlap
max_feat_start = int(feat_stop - feat_overlap) # distance from stop that will overlap req. amount
# print 'finding transcripts for ' + feat_start + " to " + feat_end + ", " + strand + ':' + notes + '\n'
filtered_data = list()
sort_data = list()
match_list = list()
print "start # of transcipts: "+ str(len(trans_data))
slice = 0
for each in trans_data:
# print "start transcript chr " + each['chr'] + ' and strand: ' + each['strand'] + " feat: "+ feat_element['chr'] + ',' + feat_element['strand']
if each['chr'] != feat_element['chr'] or each['strand'] != feat_element['strand']:
continue
trans_start = int(each['start'])
trans_stop = int(each ['stop'])
# transcript length
trans_length = trans_stop - trans_start
# print "f keys:" + ",".join(feat_element.keys())
# print "f values:" + "|".join(feat_element.values())
# print "t keys:" + ",".join(each.keys())
# print "t values: "+ "|".join(each.values())
#
# skip if transcript length is less than overlap requirement of the ORF length
if trans_length < feat_overlap:
continue
# if it is greater than or equal to overlap length, then see if it overlaps the ORF the correct amount then add to the
# 1. transcript_start is less than or eq to ORF start and trans_stop is greater than or eq to overlap req of the ORF (5' overlap AND full coverage)
# 2. transcript start is greater than ORF start, but less than max_feat_start and transcript stop is greater than ORF stop (3' overlap)
# 3. transcript start is greater than ORF start and less than max_feat_start AND transcript stop is less than ORF stop
# (get the diff between trans and ORF start and then add that to the min_feat_stop to find new min_feat_stop)
# print "transcript: " + str(trans_start) + " to " + str(trans_stop) + "; length: "+ str(trans_length)
# print "gene: " + str(feat_start) + " to " + str(feat_stop) + "; " + str(feat_overlap) + "bp overlap"
#
if (trans_start <= feat_start and min_feat_stop <= trans_stop) or (feat_start <= trans_start <= max_feat_start and trans_stop > feat_stop) or (feat_start <= trans_start <= max_feat_start and (min_feat_stop + (trans_start - feat_start)) <= trans_stop):
# calculate the % overlap #
each['per_overlap'] = str(_calc_overlap(trans_start, trans_stop, feat_start, feat_stop))
# print 'match for ' + feat_name + ": feat coord - " + str(feat_start) + "," + str(feat_stop) + " and match: " + str(trans_start) +"-"+ str(trans_stop)
# print 'chr:' + each['chr'] + ' and strand ' + each['strand'] + ' match feature ' + feat_element['chr'] + ' and strand ' + feat_element['strand']
each['notes'] = notes # add notes
each['feat'] = feat_name
print each['id'] + ' overlaps ' + each['feat']
filtered_data.append(each) # add to filtered data array
# add to list of list elements to remove from list at the end
# match_list.append(slice)
# sys.exit()
else:
continue
# print 'no match'
# sys.exit()
# slice = slice + 1 # increment slice
# print '# transcripts before sort: ' + str(len(filtered_data))
# listsorted = sorted(XWordDict, key=lambda x: int(operator.itemgetter("pos")(x)))
#sort_data = sorted(filtered_data, int(key=itemgetter('score')), reverse=True)
#sort_data = sorted(filtered_data, key = lambda x: int(operator.itemgetter("score")(x)), reverse=True)
#listsorted = sorted(XWordDict, key=lambda x: int(x['pos']))
sort_data = sorted(filtered_data, key=lambda x: (int(x['score']), int(x['per_overlap'])), reverse=True) # sort by score and then by % overlap
# if len(sort_data) > 0:
# for obj in sort_data:
# print "keys: " + "|".join(obj.keys())
# print "vals: " + ",".join(obj.values())
# sys.exit()
return sort_data
def _parse_sac_gff(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
feat_count = 1
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
#line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
line_array = line.split("\t")
if len(line_array) <= 1: ## skip any line that can't be parsed
continue
key = line_array[0]
notes_array = line_array[8].rstrip().split(';')
feat_name = notes_array[0].replace("ID=","")
# print "data: " + line
# if key exists already, then add to data array
if key in data_dict.keys():
#feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'feat_name': feat_name, 'chr': line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2],'notes': line_array[8].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'feat_name': feat_name, 'chr':line_array[0],'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'feat_type': line_array[2], 'notes':line_array[8].rstrip()}]
# add to feat_count
# for chr in data_dict.keys():
# print "chr:" + chr
# for row in data_dict[chr]:
# if row['feat_type'] == 'gene':
return (data_dict)
def _open_make_hash(file):
file_obj = open(file, 'r')
# file_data = open(file).readlines()
col_count = 0
ordered_keys = list()
data_dict = dict()
feat_data = list()
headers = dict()
for line in file_obj:
# skip line if it is a comment
if (re.match("^#+", line)):
# print "comment line: " + line
continue
line_array = list()
# print "LINE:"+ line + "\n"
line_array = line.split(",") # split by commas
length = len(line_array) # check how many columns there are
if len(line_array) <= 1: # if it is tab-delimited, then split by \t
# print "data: " + line
line_array = line.split("\t")
key = line_array[0]
# if key exists already, then add to data array
if key in data_dict.keys():
# feat_count = feat_count + 1 ## increase feat_count number
data_dict[key].append({'id': "".join(line_array[0:5]), 'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()})
else:
# print "new chromosome: " + line_array[0]
## else make a new key
data_dict[line_array[0]] = [{'id': "".join(line_array[0:5]),'chr': line_array[0], 'start' : line_array[3], 'stop': line_array[4], 'strand' : line_array[6], 'score': line_array[5].rstrip()}]
# add to feat_count
# for key in data_dict.keys():
# print key + ": num transcripts = " + str(len(data_dict[key]))#
# sys.exit()
return (data_dict)
if __name__ == "__main__":
main(sys.argv[1:]) | sorted_keys = list()
data_to_filter = dict()
two_fold_data = dict()
genome_data = dict()
transcript_data = dict()
feat_type = 'gene' ## specific feature type; use 'all' if no specific feature type
filter_val = 3 ## number to filter by (count or score)
filter_type = 'count' # can filter by count('count') or score cutoff ('cutoff')
overlap = '1' # amount of overlap a transcript has on an ORF
try:
opts, args = getopt.getopt(argv,"hi:o:l:f:v:t:s",["ifile=","ofile=", "lfile=","format=","value=","type=","seqfeat="])
except getopt.GetoptError:
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'parse_files_for_transcripts.py -i <inputfile> -o <outputfile> -l <locusfile> -f <output_file_format> -v <filter value> -t <filter_type: count OR cutoff> -s <sequence_feature: gene, CDS, etc>'
sys.exit()
elif opt in ("-i", "--ifile"): # gff3 format
firstfile = arg
elif opt in ("-o", "--ofile"): # outfile name
outfile = arg
elif opt in ("-l","--lfile"): # s_cer gff3 file
locusfile = arg
elif opt in ("-f","--format"): #gff3 or tsv/wig file ## DOESN'T WORK
file_format = arg
elif opt in ("-v","--value"): ## filter value
filter_val = arg
elif opt in ("-t","--type"): ## type of filter -- count or cutoff # Not tested
filter_type = arg
elif opt in ("-s","--seqfeat"): ## feature type # Not tested
feat_type = arg
# defining out files:
unfiltered_file = "unfiltered_" + outfile
non_match_file = "unmatched_" + outfile
# make hashes
(file_data) = _open_make_hash(firstfile) # opens file to parse and makes a hash
(genome_data) = _parse_sac_gff(locusfile) # opens and makes hash of gene annotations
# print "file keys: " + ",".join(file_data.keys())
# print "s_cer gff keys: "+ ",".join(genome_data.keys())
## iterate over all the 'gene' feature types and find the transcripts that cover the entire thing
#, non_matches)
(unfiltered_matches, filtered_transcripts, unmatched_transcripts) = _get_transcripts(file_data, genome_data, filter_type, filter_val, feat_type, overlap)
## right now, just make a gff file
_print_gff(filtered_transcripts, outfile)
_print_gff(unfiltered_matches, unfiltered_file)
_print_gff(unmatched_transcripts, non_match_file)
## if -f flag is used, then just reformat data ### | identifier_body |
mod.rs | use std::io::Error;
use std::mem::size_of;
use std::os::raw::{c_float, c_int, c_uint, c_void};
use std::ptr::null_mut;
use lawrencium::*;
mod utils_windows;
use utils_windows::*;
use crate::common::*;
pub struct GLContext {
context_ptr: HGLRC,
pixel_format_id: i32,
_pixel_format_descriptor: PIXELFORMATDESCRIPTOR,
opengl_module: HMODULE,
current_window: Option<HWND>,
device_context: Option<HDC>,
vsync: VSync,
}
impl GLContext {
pub fn new() -> GLContextBuilder {
GLContextBuilder {
gl_attributes: GLContextAttributes {
major_version: 3,
minor_version: 3,
msaa_samples: 1,
color_bits: 24,
alpha_bits: 8,
depth_bits: 24,
stencil_bits: 8,
srgb: true,
webgl_version: WebGLVersion::None,
high_resolution_framebuffer: false,
},
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> |
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if !wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglCreateContextAttribsARB(hdc: HDC, hShareContext: HGLRC, attribList: *const c_int) -> HGLRC {
unsafe {
std::mem::transmute::<_, extern "system" fn(HDC, HGLRC, *const c_int) -> HGLRC>(
wglCreateContextAttribsARB_ptr,
)(hdc, hShareContext, attribList)
}
}
// Once again these are all from here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
// A few are commented out that may be useful later.
const WGL_DRAW_TO_WINDOW_ARB: c_int = 0x2001;
// const WGL_DRAW_TO_BITMAP_ARB: c_int = 0x2002;
const WGL_ACCELERATION_ARB: c_int = 0x2003;
const WGL_SUPPORT_OPENGL_ARB: c_int = 0x2010;
const WGL_DOUBLE_BUFFER_ARB: c_int = 0x2011;
const WGL_PIXEL_TYPE_ARB: c_int = 0x2013;
const WGL_COLOR_BITS_ARB: c_int = 0x2014;
// const WGL_RED_BITS_ARB: c_int = 0x2015;
// const WGL_GREEN_BITS_ARB: c_int = 0x2017;
// const WGL_BLUE_BITS_ARB: c_int = 0x2019;
const WGL_ALPHA_BITS_ARB: c_int = 0x201B;
const WGL_DEPTH_BITS_ARB: c_int = 0x2022;
const WGL_STENCIL_BITS_ARB: c_int = 0x2023;
const WGL_FULL_ACCELERATION_ARB: c_int = 0x2027;
const WGL_TYPE_RGBA_ARB: c_int = 0x202B;
const WGL_SAMPLE_BUFFERS_ARB: c_int = 0x2041;
const WGL_SAMPLES_ARB: c_int = 0x2042;
const WGL_CONTEXT_MAJOR_VERSION_ARB: c_int = 0x2091;
const WGL_CONTEXT_MINOR_VERSION_ARB: c_int = 0x2092;
const WGL_CONTEXT_PROFILE_MASK_ARB: c_int = 0x9126;
const WGL_CONTEXT_CORE_PROFILE_BIT_ARB: c_int = 0x00000001;
// const WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB: c_int = 0x00000002;
const WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB: c_int = 0x20A9;
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglSwapIntervalEXT(i: std::os::raw::c_int) -> bool {
unsafe {
std::mem::transmute::<_, extern "system" fn(std::os::raw::c_int) -> bool>(
wglSwapIntervalEXT_ptr,
)(i)
}
}
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglGetSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglGetSwapIntervalEXT() -> std::os::raw::c_int {
unsafe {
std::mem::transmute::<_, extern "system" fn() -> std::os::raw::c_int>(
wglGetSwapIntervalEXT_ptr,
)()
}
}
| {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
} | identifier_body |
mod.rs | use std::io::Error;
use std::mem::size_of;
use std::os::raw::{c_float, c_int, c_uint, c_void};
use std::ptr::null_mut;
use lawrencium::*;
mod utils_windows;
use utils_windows::*;
use crate::common::*;
pub struct GLContext {
context_ptr: HGLRC,
pixel_format_id: i32,
_pixel_format_descriptor: PIXELFORMATDESCRIPTOR,
opengl_module: HMODULE,
current_window: Option<HWND>,
device_context: Option<HDC>,
vsync: VSync,
}
impl GLContext {
pub fn new() -> GLContextBuilder {
GLContextBuilder {
gl_attributes: GLContextAttributes {
major_version: 3,
minor_version: 3,
msaa_samples: 1,
color_bits: 24,
alpha_bits: 8,
depth_bits: 24,
stencil_bits: 8,
srgb: true,
webgl_version: WebGLVersion::None,
high_resolution_framebuffer: false,
},
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 |
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if !wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglCreateContextAttribsARB(hdc: HDC, hShareContext: HGLRC, attribList: *const c_int) -> HGLRC {
unsafe {
std::mem::transmute::<_, extern "system" fn(HDC, HGLRC, *const c_int) -> HGLRC>(
wglCreateContextAttribsARB_ptr,
)(hdc, hShareContext, attribList)
}
}
// Once again these are all from here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
// A few are commented out that may be useful later.
const WGL_DRAW_TO_WINDOW_ARB: c_int = 0x2001;
// const WGL_DRAW_TO_BITMAP_ARB: c_int = 0x2002;
const WGL_ACCELERATION_ARB: c_int = 0x2003;
const WGL_SUPPORT_OPENGL_ARB: c_int = 0x2010;
const WGL_DOUBLE_BUFFER_ARB: c_int = 0x2011;
const WGL_PIXEL_TYPE_ARB: c_int = 0x2013;
const WGL_COLOR_BITS_ARB: c_int = 0x2014;
// const WGL_RED_BITS_ARB: c_int = 0x2015;
// const WGL_GREEN_BITS_ARB: c_int = 0x2017;
// const WGL_BLUE_BITS_ARB: c_int = 0x2019;
const WGL_ALPHA_BITS_ARB: c_int = 0x201B;
const WGL_DEPTH_BITS_ARB: c_int = 0x2022;
const WGL_STENCIL_BITS_ARB: c_int = 0x2023;
const WGL_FULL_ACCELERATION_ARB: c_int = 0x2027;
const WGL_TYPE_RGBA_ARB: c_int = 0x202B;
const WGL_SAMPLE_BUFFERS_ARB: c_int = 0x2041;
const WGL_SAMPLES_ARB: c_int = 0x2042;
const WGL_CONTEXT_MAJOR_VERSION_ARB: c_int = 0x2091;
const WGL_CONTEXT_MINOR_VERSION_ARB: c_int = 0x2092;
const WGL_CONTEXT_PROFILE_MASK_ARB: c_int = 0x9126;
const WGL_CONTEXT_CORE_PROFILE_BIT_ARB: c_int = 0x00000001;
// const WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB: c_int = 0x00000002;
const WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB: c_int = 0x20A9;
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglSwapIntervalEXT(i: std::os::raw::c_int) -> bool {
unsafe {
std::mem::transmute::<_, extern "system" fn(std::os::raw::c_int) -> bool>(
wglSwapIntervalEXT_ptr,
)(i)
}
}
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglGetSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglGetSwapIntervalEXT() -> std::os::raw::c_int {
unsafe {
std::mem::transmute::<_, extern "system" fn() -> std::os::raw::c_int>(
wglGetSwapIntervalEXT_ptr,
)()
}
}
| {
panic!("Failed to release device context");
} | conditional_block |
mod.rs | use std::io::Error;
use std::mem::size_of;
use std::os::raw::{c_float, c_int, c_uint, c_void};
use std::ptr::null_mut;
use lawrencium::*;
mod utils_windows;
use utils_windows::*;
use crate::common::*;
pub struct GLContext {
context_ptr: HGLRC,
pixel_format_id: i32,
_pixel_format_descriptor: PIXELFORMATDESCRIPTOR,
opengl_module: HMODULE,
current_window: Option<HWND>,
device_context: Option<HDC>,
vsync: VSync,
}
impl GLContext {
pub fn new() -> GLContextBuilder {
GLContextBuilder {
gl_attributes: GLContextAttributes {
major_version: 3,
minor_version: 3,
msaa_samples: 1,
color_bits: 24,
alpha_bits: 8,
depth_bits: 24,
stencil_bits: 8,
srgb: true,
webgl_version: WebGLVersion::None,
high_resolution_framebuffer: false,
},
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn | (&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if !wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglCreateContextAttribsARB(hdc: HDC, hShareContext: HGLRC, attribList: *const c_int) -> HGLRC {
unsafe {
std::mem::transmute::<_, extern "system" fn(HDC, HGLRC, *const c_int) -> HGLRC>(
wglCreateContextAttribsARB_ptr,
)(hdc, hShareContext, attribList)
}
}
// Once again these are all from here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
// A few are commented out that may be useful later.
const WGL_DRAW_TO_WINDOW_ARB: c_int = 0x2001;
// const WGL_DRAW_TO_BITMAP_ARB: c_int = 0x2002;
const WGL_ACCELERATION_ARB: c_int = 0x2003;
const WGL_SUPPORT_OPENGL_ARB: c_int = 0x2010;
const WGL_DOUBLE_BUFFER_ARB: c_int = 0x2011;
const WGL_PIXEL_TYPE_ARB: c_int = 0x2013;
const WGL_COLOR_BITS_ARB: c_int = 0x2014;
// const WGL_RED_BITS_ARB: c_int = 0x2015;
// const WGL_GREEN_BITS_ARB: c_int = 0x2017;
// const WGL_BLUE_BITS_ARB: c_int = 0x2019;
const WGL_ALPHA_BITS_ARB: c_int = 0x201B;
const WGL_DEPTH_BITS_ARB: c_int = 0x2022;
const WGL_STENCIL_BITS_ARB: c_int = 0x2023;
const WGL_FULL_ACCELERATION_ARB: c_int = 0x2027;
const WGL_TYPE_RGBA_ARB: c_int = 0x202B;
const WGL_SAMPLE_BUFFERS_ARB: c_int = 0x2041;
const WGL_SAMPLES_ARB: c_int = 0x2042;
const WGL_CONTEXT_MAJOR_VERSION_ARB: c_int = 0x2091;
const WGL_CONTEXT_MINOR_VERSION_ARB: c_int = 0x2092;
const WGL_CONTEXT_PROFILE_MASK_ARB: c_int = 0x9126;
const WGL_CONTEXT_CORE_PROFILE_BIT_ARB: c_int = 0x00000001;
// const WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB: c_int = 0x00000002;
const WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB: c_int = 0x20A9;
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglSwapIntervalEXT(i: std::os::raw::c_int) -> bool {
unsafe {
std::mem::transmute::<_, extern "system" fn(std::os::raw::c_int) -> bool>(
wglSwapIntervalEXT_ptr,
)(i)
}
}
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglGetSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglGetSwapIntervalEXT() -> std::os::raw::c_int {
unsafe {
std::mem::transmute::<_, extern "system" fn() -> std::os::raw::c_int>(
wglGetSwapIntervalEXT_ptr,
)()
}
}
| get_proc_address | identifier_name |
mod.rs | use std::io::Error;
use std::mem::size_of;
use std::os::raw::{c_float, c_int, c_uint, c_void};
use std::ptr::null_mut;
use lawrencium::*;
mod utils_windows;
use utils_windows::*;
use crate::common::*;
pub struct GLContext {
context_ptr: HGLRC,
pixel_format_id: i32,
_pixel_format_descriptor: PIXELFORMATDESCRIPTOR,
opengl_module: HMODULE,
current_window: Option<HWND>,
device_context: Option<HDC>,
vsync: VSync,
}
impl GLContext {
pub fn new() -> GLContextBuilder {
GLContextBuilder {
gl_attributes: GLContextAttributes {
major_version: 3,
minor_version: 3,
msaa_samples: 1,
color_bits: 24,
alpha_bits: 8,
depth_bits: 24,
stencil_bits: 8,
srgb: true,
webgl_version: WebGLVersion::None,
high_resolution_framebuffer: false,
},
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() { | _ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class ...
// * Which is used to setup a dummy OpenGL context ...
// * Which is used to load OpenGL extensions ...
// * Which are used to set more specific pixel formats and specify an OpenGL version ...
// * Which is used to create another dummy window ...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if !wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglCreateContextAttribsARB(hdc: HDC, hShareContext: HGLRC, attribList: *const c_int) -> HGLRC {
unsafe {
std::mem::transmute::<_, extern "system" fn(HDC, HGLRC, *const c_int) -> HGLRC>(
wglCreateContextAttribsARB_ptr,
)(hdc, hShareContext, attribList)
}
}
// Once again these are all from here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
// A few are commented out that may be useful later.
const WGL_DRAW_TO_WINDOW_ARB: c_int = 0x2001;
// const WGL_DRAW_TO_BITMAP_ARB: c_int = 0x2002;
const WGL_ACCELERATION_ARB: c_int = 0x2003;
const WGL_SUPPORT_OPENGL_ARB: c_int = 0x2010;
const WGL_DOUBLE_BUFFER_ARB: c_int = 0x2011;
const WGL_PIXEL_TYPE_ARB: c_int = 0x2013;
const WGL_COLOR_BITS_ARB: c_int = 0x2014;
// const WGL_RED_BITS_ARB: c_int = 0x2015;
// const WGL_GREEN_BITS_ARB: c_int = 0x2017;
// const WGL_BLUE_BITS_ARB: c_int = 0x2019;
const WGL_ALPHA_BITS_ARB: c_int = 0x201B;
const WGL_DEPTH_BITS_ARB: c_int = 0x2022;
const WGL_STENCIL_BITS_ARB: c_int = 0x2023;
const WGL_FULL_ACCELERATION_ARB: c_int = 0x2027;
const WGL_TYPE_RGBA_ARB: c_int = 0x202B;
const WGL_SAMPLE_BUFFERS_ARB: c_int = 0x2041;
const WGL_SAMPLES_ARB: c_int = 0x2042;
const WGL_CONTEXT_MAJOR_VERSION_ARB: c_int = 0x2091;
const WGL_CONTEXT_MINOR_VERSION_ARB: c_int = 0x2092;
const WGL_CONTEXT_PROFILE_MASK_ARB: c_int = 0x9126;
const WGL_CONTEXT_CORE_PROFILE_BIT_ARB: c_int = 0x00000001;
// const WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB: c_int = 0x00000002;
const WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB: c_int = 0x20A9;
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglSwapIntervalEXT(i: std::os::raw::c_int) -> bool {
unsafe {
std::mem::transmute::<_, extern "system" fn(std::os::raw::c_int) -> bool>(
wglSwapIntervalEXT_ptr,
)(i)
}
}
// This is a C extension function requested on load.
#[allow(non_upper_case_globals)]
static mut wglGetSwapIntervalEXT_ptr: *const std::ffi::c_void = std::ptr::null();
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
fn wglGetSwapIntervalEXT() -> std::os::raw::c_int {
unsafe {
std::mem::transmute::<_, extern "system" fn() -> std::os::raw::c_int>(
wglGetSwapIntervalEXT_ptr,
)()
}
} | RawWindowHandle::Windows(handle) => handle.hwnd as HWND, | random_line_split |
appconfig.go | // Application configuration data structures
package erconfig
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/function61/edgerouter/pkg/turbocharger"
)
// can be used to fetch the current state of configuration - the apps Edgerouter knows *right now*,
// based on all the discovery mechanisms used
type CurrentConfigAccessor interface {
Apps() []Application
LastUpdated() time.Time
}
// loosely modeled after https://doc.traefik.io/traefik/v1.7/basics/#matchers
type FrontendKind string
const (
FrontendKindHostname FrontendKind = "hostname"
FrontendKindHostnameRegexp FrontendKind = "hostname_regexp"
FrontendKindPathPrefix FrontendKind = "path_prefix"
)
// https://docs.traefik.io/v1.7/basics/#matchers
type Frontend struct {
Kind FrontendKind `json:"kind"`
Hostname string `json:"hostname,omitempty"`
HostnameRegexp string `json:"hostname_regexp,omitempty"`
PathPrefix string `json:"path_prefix"` // applies with both kinds
StripPathPrefix bool `json:"strip_path_prefix,omitempty"`
AllowInsecureHTTP bool `json:"allow_insecure_http,omitempty"`
}
func (f *Frontend) Validate() error {
switch f.Kind {
case FrontendKindHostname:
return ErrorIfUnset(f.Hostname == "", "Hostname")
case FrontendKindHostnameRegexp:
if err := ErrorIfUnset(f.HostnameRegexp == "", "HostnameRegexp"); err != nil {
return err
}
_, err := regexp.Compile(f.HostnameRegexp)
if err != nil {
return fmt.Errorf("HostnameRegexp: %v", err)
}
case FrontendKindPathPrefix:
return ErrorIfUnset(f.PathPrefix == "", "PathPrefix")
default:
return fmt.Errorf("unknown frontend kind: %s", f.Kind)
}
return nil
}
type Application struct {
Id string `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func | (hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouterAdmin,
}
}
func PromMetricsBackend() Backend {
return Backend{
Kind: BackendKindPromMetrics,
}
}
func AuthV0Backend(bearerToken string, authorizedBackend Backend) Backend {
return Backend{
Kind: BackendKindAuthV0,
AuthV0Opts: &BackendOptsAuthV0{
BearerToken: bearerToken,
AuthorizedBackend: &authorizedBackend,
},
}
}
func AuthSsoBackend(
idServerUrl string,
allowedUserIds []string,
audience string,
authorizedBackend Backend,
) Backend {
return Backend{
Kind: BackendKindAuthSso,
AuthSsoOpts: &BackendOptsAuthSso{
IdServerUrl: idServerUrl,
AllowedUserIds: allowedUserIds,
Audience: audience,
AuthorizedBackend: &authorizedBackend,
},
}
}
// describers
func (a *Application) Describe() string {
lines := []string{
a.Id,
" backend = " + a.Backend.Describe(),
}
for _, frontend := range a.Frontends {
lines = append(lines, " frontend = "+frontend.Describe())
}
return strings.Join(lines, "\n")
}
func (f *Frontend) Describe() string {
switch f.Kind {
case FrontendKindHostname:
return string(f.Kind) + ":" + f.Hostname + f.PathPrefix
case FrontendKindHostnameRegexp:
return string(f.Kind) + ":" + f.HostnameRegexp + f.PathPrefix
case FrontendKindPathPrefix:
return string(f.Kind) + ":" + f.PathPrefix
default:
return string(f.Kind)
}
}
func (b *Backend) Describe() string {
switch b.Kind {
case BackendKindS3StaticWebsite:
return string(b.Kind) + ":" + b.S3StaticWebsiteOpts.DeployedVersion
case BackendKindReverseProxy:
return string(b.Kind) + ":" + strings.Join(b.ReverseProxyOpts.Origins, ", ")
case BackendKindAwsLambda:
return string(b.Kind) + ":" + fmt.Sprintf("%s@%s", b.AwsLambdaOpts.FunctionName, b.AwsLambdaOpts.RegionId)
case BackendKindAuthV0:
return string(b.Kind) + ":" + fmt.Sprintf("[bearerToken=...] -> %s", b.AuthV0Opts.AuthorizedBackend.Describe())
case BackendKindRedirect:
return string(b.Kind) + ":" + b.RedirectOpts.To
case BackendKindTurbocharger:
return string(b.Kind) + ":" + b.TurbochargerOpts.Manifest.String()
case BackendKindAuthSso:
return string(b.Kind) + ":" + fmt.Sprintf("[audience=%s] -> %s", b.AuthSsoOpts.Audience, b.AuthSsoOpts.AuthorizedBackend.Describe())
case BackendKindEdgerouterAdmin, BackendKindPromMetrics: // to please exhaustive lint
return string(b.Kind)
default: // should never actually arrive here
return string(b.Kind)
}
}
type TlsConfig struct {
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
ServerName string `json:"server_name,omitempty"` // used to verify the hostname on the server cert. also sent via SNI
}
func (t *TlsConfig) HasMeaningfulContent() bool {
if t.InsecureSkipVerify || t.ServerName != "" {
return true
} else {
return false
}
}
func (t *TlsConfig) SelfOrNilIfNoMeaningfulContent() *TlsConfig {
if t.HasMeaningfulContent() {
return t
} else {
return nil
}
}
// TODO: gokit/builtin
func ErrorIfUnset(isUnset bool, fieldName string) error {
if isUnset {
return fmt.Errorf("'%s' is required but not set", fieldName)
} else {
return nil
}
}
// TODO: gokit/builtin
func FirstError(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// frontend options builder
type frontendOptions struct {
pathPrefix string
stripPathPrefix bool
allowInsecureHTTP bool
}
func getFrontendOptions(fns []FrontendOpt) frontendOptions {
opts := &frontendOptions{
pathPrefix: "/",
}
for _, fn := range fns {
fn(opts)
}
return *opts
}
type FrontendOpt func(opts *frontendOptions)
func AllowInsecureHTTP(opts *frontendOptions) { opts.allowInsecureHTTP = true }
func PathPrefix(pathPrefix string) FrontendOpt {
return func(opts *frontendOptions) {
opts.pathPrefix = pathPrefix
}
}
func StripPathPrefix(opts *frontendOptions) { opts.stripPathPrefix = true }
| RegexpHostnameFrontend | identifier_name |
appconfig.go | // Application configuration data structures
package erconfig
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/function61/edgerouter/pkg/turbocharger"
)
// can be used to fetch the current state of configuration - the apps Edgerouter knows *right now*,
// based on all the discovery mechanisms used
type CurrentConfigAccessor interface {
Apps() []Application
LastUpdated() time.Time
}
// loosely modeled after https://doc.traefik.io/traefik/v1.7/basics/#matchers
type FrontendKind string
const (
FrontendKindHostname FrontendKind = "hostname"
FrontendKindHostnameRegexp FrontendKind = "hostname_regexp"
FrontendKindPathPrefix FrontendKind = "path_prefix"
)
// https://docs.traefik.io/v1.7/basics/#matchers
type Frontend struct {
Kind FrontendKind `json:"kind"`
Hostname string `json:"hostname,omitempty"`
HostnameRegexp string `json:"hostname_regexp,omitempty"`
PathPrefix string `json:"path_prefix"` // applies with both kinds
StripPathPrefix bool `json:"strip_path_prefix,omitempty"`
AllowInsecureHTTP bool `json:"allow_insecure_http,omitempty"`
}
func (f *Frontend) Validate() error {
switch f.Kind {
case FrontendKindHostname:
return ErrorIfUnset(f.Hostname == "", "Hostname")
case FrontendKindHostnameRegexp:
if err := ErrorIfUnset(f.HostnameRegexp == "", "HostnameRegexp"); err != nil {
return err
}
_, err := regexp.Compile(f.HostnameRegexp)
if err != nil {
return fmt.Errorf("HostnameRegexp: %v", err)
}
case FrontendKindPathPrefix:
return ErrorIfUnset(f.PathPrefix == "", "PathPrefix")
default:
return fmt.Errorf("unknown frontend kind: %s", f.Kind)
}
return nil
}
type Application struct {
Id string `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error { | ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouterAdmin,
}
}
func PromMetricsBackend() Backend {
return Backend{
Kind: BackendKindPromMetrics,
}
}
func AuthV0Backend(bearerToken string, authorizedBackend Backend) Backend {
return Backend{
Kind: BackendKindAuthV0,
AuthV0Opts: &BackendOptsAuthV0{
BearerToken: bearerToken,
AuthorizedBackend: &authorizedBackend,
},
}
}
func AuthSsoBackend(
idServerUrl string,
allowedUserIds []string,
audience string,
authorizedBackend Backend,
) Backend {
return Backend{
Kind: BackendKindAuthSso,
AuthSsoOpts: &BackendOptsAuthSso{
IdServerUrl: idServerUrl,
AllowedUserIds: allowedUserIds,
Audience: audience,
AuthorizedBackend: &authorizedBackend,
},
}
}
// describers
func (a *Application) Describe() string {
lines := []string{
a.Id,
" backend = " + a.Backend.Describe(),
}
for _, frontend := range a.Frontends {
lines = append(lines, " frontend = "+frontend.Describe())
}
return strings.Join(lines, "\n")
}
func (f *Frontend) Describe() string {
switch f.Kind {
case FrontendKindHostname:
return string(f.Kind) + ":" + f.Hostname + f.PathPrefix
case FrontendKindHostnameRegexp:
return string(f.Kind) + ":" + f.HostnameRegexp + f.PathPrefix
case FrontendKindPathPrefix:
return string(f.Kind) + ":" + f.PathPrefix
default:
return string(f.Kind)
}
}
func (b *Backend) Describe() string {
switch b.Kind {
case BackendKindS3StaticWebsite:
return string(b.Kind) + ":" + b.S3StaticWebsiteOpts.DeployedVersion
case BackendKindReverseProxy:
return string(b.Kind) + ":" + strings.Join(b.ReverseProxyOpts.Origins, ", ")
case BackendKindAwsLambda:
return string(b.Kind) + ":" + fmt.Sprintf("%s@%s", b.AwsLambdaOpts.FunctionName, b.AwsLambdaOpts.RegionId)
case BackendKindAuthV0:
return string(b.Kind) + ":" + fmt.Sprintf("[bearerToken=...] -> %s", b.AuthV0Opts.AuthorizedBackend.Describe())
case BackendKindRedirect:
return string(b.Kind) + ":" + b.RedirectOpts.To
case BackendKindTurbocharger:
return string(b.Kind) + ":" + b.TurbochargerOpts.Manifest.String()
case BackendKindAuthSso:
return string(b.Kind) + ":" + fmt.Sprintf("[audience=%s] -> %s", b.AuthSsoOpts.Audience, b.AuthSsoOpts.AuthorizedBackend.Describe())
case BackendKindEdgerouterAdmin, BackendKindPromMetrics: // to please exhaustive lint
return string(b.Kind)
default: // should never actually arrive here
return string(b.Kind)
}
}
type TlsConfig struct {
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
ServerName string `json:"server_name,omitempty"` // used to verify the hostname on the server cert. also sent via SNI
}
func (t *TlsConfig) HasMeaningfulContent() bool {
if t.InsecureSkipVerify || t.ServerName != "" {
return true
} else {
return false
}
}
func (t *TlsConfig) SelfOrNilIfNoMeaningfulContent() *TlsConfig {
if t.HasMeaningfulContent() {
return t
} else {
return nil
}
}
// TODO: gokit/builtin
func ErrorIfUnset(isUnset bool, fieldName string) error {
if isUnset {
return fmt.Errorf("'%s' is required but not set", fieldName)
} else {
return nil
}
}
// TODO: gokit/builtin
func FirstError(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// frontend options builder
type frontendOptions struct {
pathPrefix string
stripPathPrefix bool
allowInsecureHTTP bool
}
func getFrontendOptions(fns []FrontendOpt) frontendOptions {
opts := &frontendOptions{
pathPrefix: "/",
}
for _, fn := range fns {
fn(opts)
}
return *opts
}
type FrontendOpt func(opts *frontendOptions)
func AllowInsecureHTTP(opts *frontendOptions) { opts.allowInsecureHTTP = true }
func PathPrefix(pathPrefix string) FrontendOpt {
return func(opts *frontendOptions) {
opts.pathPrefix = pathPrefix
}
}
func StripPathPrefix(opts *frontendOptions) { opts.stripPathPrefix = true } | return FirstError( | random_line_split |
appconfig.go | // Application configuration data structures
package erconfig
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/function61/edgerouter/pkg/turbocharger"
)
// can be used to fetch the current state of configuration - the apps Edgerouter knows *right now*,
// based on all the discovery mechanisms used
type CurrentConfigAccessor interface {
Apps() []Application
LastUpdated() time.Time
}
// loosely modeled after https://doc.traefik.io/traefik/v1.7/basics/#matchers
type FrontendKind string
const (
FrontendKindHostname FrontendKind = "hostname"
FrontendKindHostnameRegexp FrontendKind = "hostname_regexp"
FrontendKindPathPrefix FrontendKind = "path_prefix"
)
// https://docs.traefik.io/v1.7/basics/#matchers
type Frontend struct {
Kind FrontendKind `json:"kind"`
Hostname string `json:"hostname,omitempty"`
HostnameRegexp string `json:"hostname_regexp,omitempty"`
PathPrefix string `json:"path_prefix"` // applies with both kinds
StripPathPrefix bool `json:"strip_path_prefix,omitempty"`
AllowInsecureHTTP bool `json:"allow_insecure_http,omitempty"`
}
func (f *Frontend) Validate() error {
switch f.Kind {
case FrontendKindHostname:
return ErrorIfUnset(f.Hostname == "", "Hostname")
case FrontendKindHostnameRegexp:
if err := ErrorIfUnset(f.HostnameRegexp == "", "HostnameRegexp"); err != nil {
return err
}
_, err := regexp.Compile(f.HostnameRegexp)
if err != nil {
return fmt.Errorf("HostnameRegexp: %v", err)
}
case FrontendKindPathPrefix:
return ErrorIfUnset(f.PathPrefix == "", "PathPrefix")
default:
return fmt.Errorf("unknown frontend kind: %s", f.Kind)
}
return nil
}
type Application struct {
Id string `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
}
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouterAdmin,
}
}
func PromMetricsBackend() Backend {
return Backend{
Kind: BackendKindPromMetrics,
}
}
func AuthV0Backend(bearerToken string, authorizedBackend Backend) Backend {
return Backend{
Kind: BackendKindAuthV0,
AuthV0Opts: &BackendOptsAuthV0{
BearerToken: bearerToken,
AuthorizedBackend: &authorizedBackend,
},
}
}
func AuthSsoBackend(
idServerUrl string,
allowedUserIds []string,
audience string,
authorizedBackend Backend,
) Backend {
return Backend{
Kind: BackendKindAuthSso,
AuthSsoOpts: &BackendOptsAuthSso{
IdServerUrl: idServerUrl,
AllowedUserIds: allowedUserIds,
Audience: audience,
AuthorizedBackend: &authorizedBackend,
},
}
}
// describers
func (a *Application) Describe() string {
lines := []string{
a.Id,
" backend = " + a.Backend.Describe(),
}
for _, frontend := range a.Frontends {
lines = append(lines, " frontend = "+frontend.Describe())
}
return strings.Join(lines, "\n")
}
func (f *Frontend) Describe() string {
switch f.Kind {
case FrontendKindHostname:
return string(f.Kind) + ":" + f.Hostname + f.PathPrefix
case FrontendKindHostnameRegexp:
return string(f.Kind) + ":" + f.HostnameRegexp + f.PathPrefix
case FrontendKindPathPrefix:
return string(f.Kind) + ":" + f.PathPrefix
default:
return string(f.Kind)
}
}
func (b *Backend) Describe() string {
switch b.Kind {
case BackendKindS3StaticWebsite:
return string(b.Kind) + ":" + b.S3StaticWebsiteOpts.DeployedVersion
case BackendKindReverseProxy:
return string(b.Kind) + ":" + strings.Join(b.ReverseProxyOpts.Origins, ", ")
case BackendKindAwsLambda:
return string(b.Kind) + ":" + fmt.Sprintf("%s@%s", b.AwsLambdaOpts.FunctionName, b.AwsLambdaOpts.RegionId)
case BackendKindAuthV0:
return string(b.Kind) + ":" + fmt.Sprintf("[bearerToken=...] -> %s", b.AuthV0Opts.AuthorizedBackend.Describe())
case BackendKindRedirect:
return string(b.Kind) + ":" + b.RedirectOpts.To
case BackendKindTurbocharger:
return string(b.Kind) + ":" + b.TurbochargerOpts.Manifest.String()
case BackendKindAuthSso:
return string(b.Kind) + ":" + fmt.Sprintf("[audience=%s] -> %s", b.AuthSsoOpts.Audience, b.AuthSsoOpts.AuthorizedBackend.Describe())
case BackendKindEdgerouterAdmin, BackendKindPromMetrics: // to please exhaustive lint
return string(b.Kind)
default: // should never actually arrive here
return string(b.Kind)
}
}
type TlsConfig struct {
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
ServerName string `json:"server_name,omitempty"` // used to verify the hostname on the server cert. also sent via SNI
}
func (t *TlsConfig) HasMeaningfulContent() bool {
if t.InsecureSkipVerify || t.ServerName != "" {
return true
} else {
return false
}
}
func (t *TlsConfig) SelfOrNilIfNoMeaningfulContent() *TlsConfig {
if t.HasMeaningfulContent() {
return t
} else {
return nil
}
}
// TODO: gokit/builtin
func ErrorIfUnset(isUnset bool, fieldName string) error {
if isUnset {
return fmt.Errorf("'%s' is required but not set", fieldName)
} else {
return nil
}
}
// TODO: gokit/builtin
func FirstError(errs ...error) error {
for _, err := range errs |
return nil
}
// frontend options builder
type frontendOptions struct {
pathPrefix string
stripPathPrefix bool
allowInsecureHTTP bool
}
func getFrontendOptions(fns []FrontendOpt) frontendOptions {
opts := &frontendOptions{
pathPrefix: "/",
}
for _, fn := range fns {
fn(opts)
}
return *opts
}
type FrontendOpt func(opts *frontendOptions)
func AllowInsecureHTTP(opts *frontendOptions) { opts.allowInsecureHTTP = true }
func PathPrefix(pathPrefix string) FrontendOpt {
return func(opts *frontendOptions) {
opts.pathPrefix = pathPrefix
}
}
func StripPathPrefix(opts *frontendOptions) { opts.stripPathPrefix = true }
| {
if err != nil {
return err
}
} | conditional_block |
appconfig.go | // Application configuration data structures
package erconfig
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/function61/edgerouter/pkg/turbocharger"
)
// can be used to fetch the current state of configuration - the apps Edgerouter knows *right now*,
// based on all the discovery mechanisms used
type CurrentConfigAccessor interface {
Apps() []Application
LastUpdated() time.Time
}
// loosely modeled after https://doc.traefik.io/traefik/v1.7/basics/#matchers
type FrontendKind string
const (
FrontendKindHostname FrontendKind = "hostname"
FrontendKindHostnameRegexp FrontendKind = "hostname_regexp"
FrontendKindPathPrefix FrontendKind = "path_prefix"
)
// https://docs.traefik.io/v1.7/basics/#matchers
type Frontend struct {
Kind FrontendKind `json:"kind"`
Hostname string `json:"hostname,omitempty"`
HostnameRegexp string `json:"hostname_regexp,omitempty"`
PathPrefix string `json:"path_prefix"` // applies with both kinds
StripPathPrefix bool `json:"strip_path_prefix,omitempty"`
AllowInsecureHTTP bool `json:"allow_insecure_http,omitempty"`
}
func (f *Frontend) Validate() error {
switch f.Kind {
case FrontendKindHostname:
return ErrorIfUnset(f.Hostname == "", "Hostname")
case FrontendKindHostnameRegexp:
if err := ErrorIfUnset(f.HostnameRegexp == "", "HostnameRegexp"); err != nil {
return err
}
_, err := regexp.Compile(f.HostnameRegexp)
if err != nil {
return fmt.Errorf("HostnameRegexp: %v", err)
}
case FrontendKindPathPrefix:
return ErrorIfUnset(f.PathPrefix == "", "PathPrefix")
default:
return fmt.Errorf("unknown frontend kind: %s", f.Kind)
}
return nil
}
type Application struct {
Id string `json:"id"` // ACLs can reference this, so keep stable (i.e. service replicas/restarts should not affect this)
Frontends []Frontend `json:"frontends"`
Backend Backend `json:"backend"`
}
func (a *Application) Validate() error {
if err := ErrorIfUnset(a.Id == "", "Id"); err != nil {
return err
}
if err := ErrorIfUnset(len(a.Frontends) == 0, "Frontends"); err != nil {
return err
}
for _, frontend := range a.Frontends {
if err := frontend.Validate(); err != nil {
return fmt.Errorf("app %s frontend: %v", a.Id, err)
}
}
switch a.Backend.Kind {
case BackendKindS3StaticWebsite:
return a.Backend.S3StaticWebsiteOpts.Validate()
case BackendKindReverseProxy:
return a.Backend.ReverseProxyOpts.Validate()
case BackendKindAwsLambda:
return a.Backend.AwsLambdaOpts.Validate()
case BackendKindEdgerouterAdmin, BackendKindPromMetrics:
return nil // nothing to validate
case BackendKindAuthV0:
return a.Backend.AuthV0Opts.Validate()
case BackendKindAuthSso:
return a.Backend.AuthSsoOpts.Validate()
case BackendKindRedirect:
return a.Backend.RedirectOpts.Validate()
case BackendKindTurbocharger:
return a.Backend.TurbochargerOpts.Validate()
default:
return fmt.Errorf("app %s backend unkown kind: %s", a.Id, a.Backend.Kind)
}
}
// when adding new kind, remember to update:
// - Application.Validate()
// - Backend.Describe()
// - factory in backendfactory
type BackendKind string
const (
BackendKindS3StaticWebsite BackendKind = "s3_static_website"
BackendKindReverseProxy BackendKind = "reverse_proxy"
BackendKindAwsLambda BackendKind = "aws_lambda"
BackendKindEdgerouterAdmin BackendKind = "edgerouter_admin"
BackendKindAuthV0 BackendKind = "auth_v0"
BackendKindAuthSso BackendKind = "auth_sso"
BackendKindRedirect BackendKind = "redirect"
BackendKindPromMetrics BackendKind = "prom_metrics"
BackendKindTurbocharger BackendKind = "turbocharger"
)
type Backend struct {
Kind BackendKind `json:"kind"`
S3StaticWebsiteOpts *BackendOptsS3StaticWebsite `json:"s3_static_website_opts,omitempty"`
ReverseProxyOpts *BackendOptsReverseProxy `json:"reverse_proxy_opts,omitempty"`
AwsLambdaOpts *BackendOptsAwsLambda `json:"aws_lambda_opts,omitempty"`
AuthV0Opts *BackendOptsAuthV0 `json:"auth_v0_opts,omitempty"`
AuthSsoOpts *BackendOptsAuthSso `json:"auth_sso_opts,omitempty"`
RedirectOpts *BackendOptsRedirect `json:"redirect_opts,omitempty"`
TurbochargerOpts *BackendOptsTurbocharger `json:"turbocharger_opts,omitempty"`
}
type BackendOptsS3StaticWebsite struct {
BucketName string `json:"bucket_name"`
RegionId string `json:"region_id"`
DeployedVersion string `json:"deployed_version"` // can be empty before first deployed version
NotFoundPage string `json:"404_page,omitempty"` // (optional) ex: "404.html", relative to root of deployed site
}
func (b *BackendOptsS3StaticWebsite) Validate() error {
return FirstError(
ErrorIfUnset(b.BucketName == "", "BucketName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsReverseProxy struct {
Origins []string `json:"origins"`
TlsConfig *TlsConfig `json:"tls_config,omitempty"`
Caching bool `json:"caching,omitempty"` // turn on response caching?
PassHostHeader bool `json:"pass_host_header,omitempty"` // use client-sent Host (=true) or origin's hostname? (=false) https://doc.traefik.io/traefik/routing/services/#pass-host-header
IndexDocument string `json:"index_document,omitempty"` // if request path ends in /foo/ ("directory"), rewrite it into /foo/index.html
RemoveQueryString bool `json:"remove_query_string,omitempty"` // reduces cache misses if responses don't vary on qs
HeadersToOrigin map[string]string `json:"headers_to_origin,omitempty"` // force-add headers to be sent to origin
}
func (b *BackendOptsReverseProxy) Validate() error |
type BackendOptsAwsLambda struct {
FunctionName string `json:"function_name"`
RegionId string `json:"region_id"`
}
func (b *BackendOptsAwsLambda) Validate() error {
return FirstError(
ErrorIfUnset(b.FunctionName == "", "FunctionName"),
ErrorIfUnset(b.RegionId == "", "RegionId"),
)
}
type BackendOptsAuthV0 struct {
BearerToken string `json:"bearer_token"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthV0) Validate() error {
return FirstError(
ErrorIfUnset(b.BearerToken == "", "BearerToken"),
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
)
}
type BackendOptsAuthSso struct {
IdServerUrl string `json:"id_server_url,omitempty"`
AllowedUserIds []string `json:"allowed_user_ids"`
Audience string `json:"audience"`
AuthorizedBackend *Backend `json:"authorized_backend"` // ptr for validation
}
func (b *BackendOptsAuthSso) Validate() error {
return FirstError(
ErrorIfUnset(b.AuthorizedBackend == nil, "AuthorizedBackend"),
ErrorIfUnset(b.Audience == "", "Audience"),
)
}
type BackendOptsRedirect struct {
To string `json:"to"`
}
func (b *BackendOptsRedirect) Validate() error {
return ErrorIfUnset(b.To == "", "To")
}
type BackendOptsTurbocharger struct {
Manifest turbocharger.ObjectID `json:"manifest"`
}
func (b *BackendOptsTurbocharger) Validate() error {
return nil
}
// factories
func SimpleApplication(id string, frontend Frontend, backend Backend) Application {
return Application{
Id: id,
Frontends: []Frontend{
frontend,
},
Backend: backend,
}
}
func SimpleHostnameFrontend(hostname string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostname,
Hostname: hostname,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func RegexpHostnameFrontend(hostnameRegexp string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(options)
return Frontend{
Kind: FrontendKindHostnameRegexp,
HostnameRegexp: hostnameRegexp,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
// catches all requests irregardless of hostname
func PathPrefixFrontend(pathPrefix string, options ...FrontendOpt) Frontend {
opts := getFrontendOptions(append([]FrontendOpt{PathPrefix(pathPrefix)}, options...))
return Frontend{
Kind: FrontendKindPathPrefix,
PathPrefix: opts.pathPrefix,
StripPathPrefix: opts.stripPathPrefix,
AllowInsecureHTTP: opts.allowInsecureHTTP,
}
}
func S3Backend(bucketName string, regionId string, deployedVersion string) Backend {
return Backend{
Kind: BackendKindS3StaticWebsite,
S3StaticWebsiteOpts: &BackendOptsS3StaticWebsite{
BucketName: bucketName,
RegionId: regionId,
DeployedVersion: deployedVersion,
},
}
}
func ReverseProxyBackend(addrs []string, tlsConfig *TlsConfig, passHostHeader bool) Backend {
return Backend{
Kind: BackendKindReverseProxy,
ReverseProxyOpts: &BackendOptsReverseProxy{
Origins: addrs,
TlsConfig: tlsConfig,
PassHostHeader: passHostHeader,
},
}
}
func RedirectBackend(to string) Backend {
return Backend{
Kind: BackendKindRedirect,
RedirectOpts: &BackendOptsRedirect{
To: to,
},
}
}
func TurbochargerBackend(manifestID turbocharger.ObjectID) Backend {
return Backend{
Kind: BackendKindTurbocharger,
TurbochargerOpts: &BackendOptsTurbocharger{
Manifest: manifestID,
},
}
}
func LambdaBackend(functionName string, regionId string) Backend {
return Backend{
Kind: BackendKindAwsLambda,
AwsLambdaOpts: &BackendOptsAwsLambda{
FunctionName: functionName,
RegionId: regionId,
},
}
}
func EdgerouterAdminBackend() Backend {
return Backend{
Kind: BackendKindEdgerouterAdmin,
}
}
func PromMetricsBackend() Backend {
return Backend{
Kind: BackendKindPromMetrics,
}
}
func AuthV0Backend(bearerToken string, authorizedBackend Backend) Backend {
return Backend{
Kind: BackendKindAuthV0,
AuthV0Opts: &BackendOptsAuthV0{
BearerToken: bearerToken,
AuthorizedBackend: &authorizedBackend,
},
}
}
func AuthSsoBackend(
idServerUrl string,
allowedUserIds []string,
audience string,
authorizedBackend Backend,
) Backend {
return Backend{
Kind: BackendKindAuthSso,
AuthSsoOpts: &BackendOptsAuthSso{
IdServerUrl: idServerUrl,
AllowedUserIds: allowedUserIds,
Audience: audience,
AuthorizedBackend: &authorizedBackend,
},
}
}
// describers
func (a *Application) Describe() string {
lines := []string{
a.Id,
" backend = " + a.Backend.Describe(),
}
for _, frontend := range a.Frontends {
lines = append(lines, " frontend = "+frontend.Describe())
}
return strings.Join(lines, "\n")
}
func (f *Frontend) Describe() string {
switch f.Kind {
case FrontendKindHostname:
return string(f.Kind) + ":" + f.Hostname + f.PathPrefix
case FrontendKindHostnameRegexp:
return string(f.Kind) + ":" + f.HostnameRegexp + f.PathPrefix
case FrontendKindPathPrefix:
return string(f.Kind) + ":" + f.PathPrefix
default:
return string(f.Kind)
}
}
func (b *Backend) Describe() string {
switch b.Kind {
case BackendKindS3StaticWebsite:
return string(b.Kind) + ":" + b.S3StaticWebsiteOpts.DeployedVersion
case BackendKindReverseProxy:
return string(b.Kind) + ":" + strings.Join(b.ReverseProxyOpts.Origins, ", ")
case BackendKindAwsLambda:
return string(b.Kind) + ":" + fmt.Sprintf("%s@%s", b.AwsLambdaOpts.FunctionName, b.AwsLambdaOpts.RegionId)
case BackendKindAuthV0:
return string(b.Kind) + ":" + fmt.Sprintf("[bearerToken=...] -> %s", b.AuthV0Opts.AuthorizedBackend.Describe())
case BackendKindRedirect:
return string(b.Kind) + ":" + b.RedirectOpts.To
case BackendKindTurbocharger:
return string(b.Kind) + ":" + b.TurbochargerOpts.Manifest.String()
case BackendKindAuthSso:
return string(b.Kind) + ":" + fmt.Sprintf("[audience=%s] -> %s", b.AuthSsoOpts.Audience, b.AuthSsoOpts.AuthorizedBackend.Describe())
case BackendKindEdgerouterAdmin, BackendKindPromMetrics: // to please exhaustive lint
return string(b.Kind)
default: // should never actually arrive here
return string(b.Kind)
}
}
type TlsConfig struct {
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
ServerName string `json:"server_name,omitempty"` // used to verify the hostname on the server cert. also sent via SNI
}
func (t *TlsConfig) HasMeaningfulContent() bool {
if t.InsecureSkipVerify || t.ServerName != "" {
return true
} else {
return false
}
}
func (t *TlsConfig) SelfOrNilIfNoMeaningfulContent() *TlsConfig {
if t.HasMeaningfulContent() {
return t
} else {
return nil
}
}
// TODO: gokit/builtin
func ErrorIfUnset(isUnset bool, fieldName string) error {
if isUnset {
return fmt.Errorf("'%s' is required but not set", fieldName)
} else {
return nil
}
}
// TODO: gokit/builtin
func FirstError(errs ...error) error {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// frontend options builder
type frontendOptions struct {
pathPrefix string
stripPathPrefix bool
allowInsecureHTTP bool
}
func getFrontendOptions(fns []FrontendOpt) frontendOptions {
opts := &frontendOptions{
pathPrefix: "/",
}
for _, fn := range fns {
fn(opts)
}
return *opts
}
type FrontendOpt func(opts *frontendOptions)
func AllowInsecureHTTP(opts *frontendOptions) { opts.allowInsecureHTTP = true }
func PathPrefix(pathPrefix string) FrontendOpt {
return func(opts *frontendOptions) {
opts.pathPrefix = pathPrefix
}
}
func StripPathPrefix(opts *frontendOptions) { opts.stripPathPrefix = true }
| {
return ErrorIfUnset(len(b.Origins) == 0, "Origins")
} | identifier_body |
flutter_service_worker.js | 'use strict';
const MANIFEST = 'flutter-app-manifest';
const TEMP = 'flutter-temp-cache';
const CACHE_NAME = 'flutter-app-cache';
const RESOURCES = {
"assets/AssetManifest.json": "9b877279b82fcd9d9c6f92ca3c999525",
"assets/assets/sounds/Index10Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index10Length1.wav": "e3876e80d51303ca86beedab3f8ef10b",
"assets/assets/sounds/Index10Length2.wav": "06402c9c967ec183ee951b3786fc8c74",
"assets/assets/sounds/Index10Length3.wav": "024dcd98260d9cc4751ac6264f0fde0c",
"assets/assets/sounds/Index10Length4.wav": "c4a6f3bd2e0195e77f5f38b4191b288e",
"assets/assets/sounds/Index10Length6.wav": "0d76e4e436b4b43a0c2fbac65307964d",
"assets/assets/sounds/Index11Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index11Length1.wav": "7bdbcff632a39166b9fe9d74a788636e",
"assets/assets/sounds/Index11Length2.wav": "42e8f17a8f797493edc44bcc479d60fa",
"assets/assets/sounds/Index11Length3.wav": "a05e66d8ff1c998d9638567727e49645",
"assets/assets/sounds/Index11Length4.wav": "584125ca091d3be316c08ee797719d14",
"assets/assets/sounds/Index11Length6.wav": "6cb646a92523094c596756be14171dda",
"assets/assets/sounds/Index12Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index12Length1.wav": "11247e748acdc0679524b3160b840563",
"assets/assets/sounds/Index12Length2.wav": "29e138b05d825e94cb21b1a09a229d6e",
"assets/assets/sounds/Index12Length3.wav": "a43d31bab7e6ba732333e914b21366ca",
"assets/assets/sounds/Index12Length4.wav": "249220af94595507cf2b8be63ae06b53",
"assets/assets/sounds/Index13Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index13Length1.wav": "12d17bffd3558c90721779474d2db81b", | "assets/assets/sounds/Index13Length4.wav": "4eac2adb92f81fe26c37e268f703fa2c",
"assets/assets/sounds/Index14Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index14Length1.wav": "aa7777a5e53a9514c89a01beedf05a13",
"assets/assets/sounds/Index14Length2.wav": "85f14e073d1698e24709436f91eae42c",
"assets/assets/sounds/Index14Length3.wav": "cdf2a57e901376c7655ac9b90728cabd",
"assets/assets/sounds/Index15Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index15Length1.wav": "51de52e5be0125a54c637a7a736b3055",
"assets/assets/sounds/Index15Length2.wav": "7db7b43cab2c9294dd458e1d0b57dfae",
"assets/assets/sounds/Index16Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index16Length1.wav": "099e44307f3bdf7174b4f44836ecc658",
"assets/assets/sounds/Index1Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index1Length1.wav": "772615684d0f2489bf9166a569356866",
"assets/assets/sounds/Index1Length12.wav": "4887a12550b4952f0f6e189ddbe323c7",
"assets/assets/sounds/Index1Length16.wav": "b119c78b320e927c6f3ffae5a9f30a04",
"assets/assets/sounds/Index1Length2.wav": "39eb8faadc824e8fed8d18866ce1a2d4",
"assets/assets/sounds/Index1Length3.wav": "3999d2770500a785fa6d10e571e16920",
"assets/assets/sounds/Index1Length4.wav": "01ae3bf9420f02a7ae3542ec8d898485",
"assets/assets/sounds/Index1Length6.wav": "1edf1340657f9bdc3d048a1409c3b791",
"assets/assets/sounds/Index1Length8.wav": "23571245b6b38b08e7938f26dade427a",
"assets/assets/sounds/Index2Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index2Length1.wav": "2b209a60499653a81c82b2e34cadee05",
"assets/assets/sounds/Index2Length12.wav": "b77500c1be01fc23fc87df1d02a88584",
"assets/assets/sounds/Index2Length2.wav": "d3b4a0fbcc215c7c9b398b481b646e69",
"assets/assets/sounds/Index2Length3.wav": "9d4681203fa79fda8a799f8abb2274d0",
"assets/assets/sounds/Index2Length4.wav": "9bc307dfc2810972d7fb851780bd4856",
"assets/assets/sounds/Index2Length6.wav": "12256e9827dd704044361b6bea633697",
"assets/assets/sounds/Index2Length8.wav": "236cc6760ff8709f610a5df29f7ee914",
"assets/assets/sounds/Index3Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index3Length1.wav": "25501e35dab4ac090f3d393961c71469",
"assets/assets/sounds/Index3Length12.wav": "6b153fdd906699556ac9c48fe901f824",
"assets/assets/sounds/Index3Length2.wav": "3edf9bea4df56bd4e8298b080258e7ed",
"assets/assets/sounds/Index3Length3.wav": "46134b6c36782d084478b5e9e6b762fe",
"assets/assets/sounds/Index3Length4.wav": "6f107aa30809dc0acc671da9d57e0c98",
"assets/assets/sounds/Index3Length6.wav": "665578c3526000a6bb0979a8b71826af",
"assets/assets/sounds/Index3Length8.wav": "3d806f30a2daba348cc6b946cbcccdf7",
"assets/assets/sounds/Index4Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index4Length1.wav": "3743e7eb1b51781664c79375b942c6da",
"assets/assets/sounds/Index4Length12.wav": "c43ada913dc18cad452a60dd182ed75e",
"assets/assets/sounds/Index4Length2.wav": "6cf0776fffa186c2c2a12558d83768a7",
"assets/assets/sounds/Index4Length3.wav": "f849bddc85b2e7c6fd1f63bcbbd40a6d",
"assets/assets/sounds/Index4Length4.wav": "6b24a2da10a2f1c6a0b04191473341a1",
"assets/assets/sounds/Index4Length6.wav": "c458131ab9551df4f184a25f4e11b263",
"assets/assets/sounds/Index4Length8.wav": "81464c9774dad9c6a3580647ed9d3666",
"assets/assets/sounds/Index5Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index5Length1.wav": "806ab8337ca57173787a4e702a1344ef",
"assets/assets/sounds/Index5Length12.wav": "c1fe4a3b8bc188a2815d18aabb8cef2d",
"assets/assets/sounds/Index5Length2.wav": "87db89ddecceb04ed380c9c0d4ff5859",
"assets/assets/sounds/Index5Length3.wav": "b05ee87ff7b682b9297c55dcb98d93a1",
"assets/assets/sounds/Index5Length4.wav": "3978dc493ca597ce2b1686c187b7de0c",
"assets/assets/sounds/Index5Length6.wav": "94340e0274991073a0cc1edbf4af7a3b",
"assets/assets/sounds/Index5Length8.wav": "4ae7798d014cc138b73f42b33898b042",
"assets/assets/sounds/Index6Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index6Length1.wav": "35f2f15487f285b565a6d84042e0d74f",
"assets/assets/sounds/Index6Length2.wav": "396cfd69805ac29ee24ab5fd58364887",
"assets/assets/sounds/Index6Length3.wav": "4dcac0661072a743e9a60d37be96c83f",
"assets/assets/sounds/Index6Length4.wav": "199f2803267b1bd86cf2aea43bd9e0e9",
"assets/assets/sounds/Index6Length6.wav": "7db149b1a69a631b7ac7d6eada0fe29b",
"assets/assets/sounds/Index6Length8.wav": "3c989280c19fa8396aaf3d5cd400ee52",
"assets/assets/sounds/Index7Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index7Length1.wav": "4ab63c3dcdbf66c8e8684e4571afdb31",
"assets/assets/sounds/Index7Length2.wav": "bda7c70ac50680c753f48c31c94abebd",
"assets/assets/sounds/Index7Length3.wav": "2e5c9f44fd79ead45bed0bf55a3be14d",
"assets/assets/sounds/Index7Length4.wav": "1db2d77c01871ee551dd68f75f66c107",
"assets/assets/sounds/Index7Length6.wav": "e4e82234ff5db0537f85be08df8cac2a",
"assets/assets/sounds/Index7Length8.wav": "5a58689b15708f9b02869612ab96a54b",
"assets/assets/sounds/Index8Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index8Length1.wav": "72aed5bbbe74c1d238c625940216f407",
"assets/assets/sounds/Index8Length2.wav": "8a27589e239f08d83a6eeb4b3a7f02e5",
"assets/assets/sounds/Index8Length3.wav": "fe792670c83be5650b0a692e008c68e9",
"assets/assets/sounds/Index8Length4.wav": "c2c338ea17f946e033e63e740e84853e",
"assets/assets/sounds/Index8Length6.wav": "bf359945806a4c1a1af022d3ab7c8388",
"assets/assets/sounds/Index8Length8.wav": "7685dc4811b1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') {
downloadOffline();
return;
}
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
}
// Attempt to download the resource online before falling back to
// the offline cache.
function onlineFirst(event) {
return event.respondWith(
fetch(event.request).then((response) => {
return caches.open(CACHE_NAME).then((cache) => {
cache.put(event.request, response.clone());
return response;
});
}).catch((error) => {
return caches.open(CACHE_NAME).then((cache) => {
return cache.match(event.request).then((response) => {
if (response != null) {
return response;
}
throw error;
});
});
})
);
} | "assets/assets/sounds/Index13Length2.wav": "746f509a12e5f7f2ece7209e7722f3f9",
"assets/assets/sounds/Index13Length3.wav": "90665e5c989f7e7379e2a7e4d54f3aac", | random_line_split |
flutter_service_worker.js | 'use strict';
const MANIFEST = 'flutter-app-manifest';
const TEMP = 'flutter-temp-cache';
const CACHE_NAME = 'flutter-app-cache';
const RESOURCES = {
"assets/AssetManifest.json": "9b877279b82fcd9d9c6f92ca3c999525",
"assets/assets/sounds/Index10Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index10Length1.wav": "e3876e80d51303ca86beedab3f8ef10b",
"assets/assets/sounds/Index10Length2.wav": "06402c9c967ec183ee951b3786fc8c74",
"assets/assets/sounds/Index10Length3.wav": "024dcd98260d9cc4751ac6264f0fde0c",
"assets/assets/sounds/Index10Length4.wav": "c4a6f3bd2e0195e77f5f38b4191b288e",
"assets/assets/sounds/Index10Length6.wav": "0d76e4e436b4b43a0c2fbac65307964d",
"assets/assets/sounds/Index11Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index11Length1.wav": "7bdbcff632a39166b9fe9d74a788636e",
"assets/assets/sounds/Index11Length2.wav": "42e8f17a8f797493edc44bcc479d60fa",
"assets/assets/sounds/Index11Length3.wav": "a05e66d8ff1c998d9638567727e49645",
"assets/assets/sounds/Index11Length4.wav": "584125ca091d3be316c08ee797719d14",
"assets/assets/sounds/Index11Length6.wav": "6cb646a92523094c596756be14171dda",
"assets/assets/sounds/Index12Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index12Length1.wav": "11247e748acdc0679524b3160b840563",
"assets/assets/sounds/Index12Length2.wav": "29e138b05d825e94cb21b1a09a229d6e",
"assets/assets/sounds/Index12Length3.wav": "a43d31bab7e6ba732333e914b21366ca",
"assets/assets/sounds/Index12Length4.wav": "249220af94595507cf2b8be63ae06b53",
"assets/assets/sounds/Index13Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index13Length1.wav": "12d17bffd3558c90721779474d2db81b",
"assets/assets/sounds/Index13Length2.wav": "746f509a12e5f7f2ece7209e7722f3f9",
"assets/assets/sounds/Index13Length3.wav": "90665e5c989f7e7379e2a7e4d54f3aac",
"assets/assets/sounds/Index13Length4.wav": "4eac2adb92f81fe26c37e268f703fa2c",
"assets/assets/sounds/Index14Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index14Length1.wav": "aa7777a5e53a9514c89a01beedf05a13",
"assets/assets/sounds/Index14Length2.wav": "85f14e073d1698e24709436f91eae42c",
"assets/assets/sounds/Index14Length3.wav": "cdf2a57e901376c7655ac9b90728cabd",
"assets/assets/sounds/Index15Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index15Length1.wav": "51de52e5be0125a54c637a7a736b3055",
"assets/assets/sounds/Index15Length2.wav": "7db7b43cab2c9294dd458e1d0b57dfae",
"assets/assets/sounds/Index16Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index16Length1.wav": "099e44307f3bdf7174b4f44836ecc658",
"assets/assets/sounds/Index1Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index1Length1.wav": "772615684d0f2489bf9166a569356866",
"assets/assets/sounds/Index1Length12.wav": "4887a12550b4952f0f6e189ddbe323c7",
"assets/assets/sounds/Index1Length16.wav": "b119c78b320e927c6f3ffae5a9f30a04",
"assets/assets/sounds/Index1Length2.wav": "39eb8faadc824e8fed8d18866ce1a2d4",
"assets/assets/sounds/Index1Length3.wav": "3999d2770500a785fa6d10e571e16920",
"assets/assets/sounds/Index1Length4.wav": "01ae3bf9420f02a7ae3542ec8d898485",
"assets/assets/sounds/Index1Length6.wav": "1edf1340657f9bdc3d048a1409c3b791",
"assets/assets/sounds/Index1Length8.wav": "23571245b6b38b08e7938f26dade427a",
"assets/assets/sounds/Index2Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index2Length1.wav": "2b209a60499653a81c82b2e34cadee05",
"assets/assets/sounds/Index2Length12.wav": "b77500c1be01fc23fc87df1d02a88584",
"assets/assets/sounds/Index2Length2.wav": "d3b4a0fbcc215c7c9b398b481b646e69",
"assets/assets/sounds/Index2Length3.wav": "9d4681203fa79fda8a799f8abb2274d0",
"assets/assets/sounds/Index2Length4.wav": "9bc307dfc2810972d7fb851780bd4856",
"assets/assets/sounds/Index2Length6.wav": "12256e9827dd704044361b6bea633697",
"assets/assets/sounds/Index2Length8.wav": "236cc6760ff8709f610a5df29f7ee914",
"assets/assets/sounds/Index3Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index3Length1.wav": "25501e35dab4ac090f3d393961c71469",
"assets/assets/sounds/Index3Length12.wav": "6b153fdd906699556ac9c48fe901f824",
"assets/assets/sounds/Index3Length2.wav": "3edf9bea4df56bd4e8298b080258e7ed",
"assets/assets/sounds/Index3Length3.wav": "46134b6c36782d084478b5e9e6b762fe",
"assets/assets/sounds/Index3Length4.wav": "6f107aa30809dc0acc671da9d57e0c98",
"assets/assets/sounds/Index3Length6.wav": "665578c3526000a6bb0979a8b71826af",
"assets/assets/sounds/Index3Length8.wav": "3d806f30a2daba348cc6b946cbcccdf7",
"assets/assets/sounds/Index4Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index4Length1.wav": "3743e7eb1b51781664c79375b942c6da",
"assets/assets/sounds/Index4Length12.wav": "c43ada913dc18cad452a60dd182ed75e",
"assets/assets/sounds/Index4Length2.wav": "6cf0776fffa186c2c2a12558d83768a7",
"assets/assets/sounds/Index4Length3.wav": "f849bddc85b2e7c6fd1f63bcbbd40a6d",
"assets/assets/sounds/Index4Length4.wav": "6b24a2da10a2f1c6a0b04191473341a1",
"assets/assets/sounds/Index4Length6.wav": "c458131ab9551df4f184a25f4e11b263",
"assets/assets/sounds/Index4Length8.wav": "81464c9774dad9c6a3580647ed9d3666",
"assets/assets/sounds/Index5Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index5Length1.wav": "806ab8337ca57173787a4e702a1344ef",
"assets/assets/sounds/Index5Length12.wav": "c1fe4a3b8bc188a2815d18aabb8cef2d",
"assets/assets/sounds/Index5Length2.wav": "87db89ddecceb04ed380c9c0d4ff5859",
"assets/assets/sounds/Index5Length3.wav": "b05ee87ff7b682b9297c55dcb98d93a1",
"assets/assets/sounds/Index5Length4.wav": "3978dc493ca597ce2b1686c187b7de0c",
"assets/assets/sounds/Index5Length6.wav": "94340e0274991073a0cc1edbf4af7a3b",
"assets/assets/sounds/Index5Length8.wav": "4ae7798d014cc138b73f42b33898b042",
"assets/assets/sounds/Index6Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index6Length1.wav": "35f2f15487f285b565a6d84042e0d74f",
"assets/assets/sounds/Index6Length2.wav": "396cfd69805ac29ee24ab5fd58364887",
"assets/assets/sounds/Index6Length3.wav": "4dcac0661072a743e9a60d37be96c83f",
"assets/assets/sounds/Index6Length4.wav": "199f2803267b1bd86cf2aea43bd9e0e9",
"assets/assets/sounds/Index6Length6.wav": "7db149b1a69a631b7ac7d6eada0fe29b",
"assets/assets/sounds/Index6Length8.wav": "3c989280c19fa8396aaf3d5cd400ee52",
"assets/assets/sounds/Index7Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index7Length1.wav": "4ab63c3dcdbf66c8e8684e4571afdb31",
"assets/assets/sounds/Index7Length2.wav": "bda7c70ac50680c753f48c31c94abebd",
"assets/assets/sounds/Index7Length3.wav": "2e5c9f44fd79ead45bed0bf55a3be14d",
"assets/assets/sounds/Index7Length4.wav": "1db2d77c01871ee551dd68f75f66c107",
"assets/assets/sounds/Index7Length6.wav": "e4e82234ff5db0537f85be08df8cac2a",
"assets/assets/sounds/Index7Length8.wav": "5a58689b15708f9b02869612ab96a54b",
"assets/assets/sounds/Index8Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index8Length1.wav": "72aed5bbbe74c1d238c625940216f407",
"assets/assets/sounds/Index8Length2.wav": "8a27589e239f08d83a6eeb4b3a7f02e5",
"assets/assets/sounds/Index8Length3.wav": "fe792670c83be5650b0a692e008c68e9",
"assets/assets/sounds/Index8Length4.wav": "c2c338ea17f946e033e63e740e84853e",
"assets/assets/sounds/Index8Length6.wav": "bf359945806a4c1a1af022d3ab7c8388",
"assets/assets/sounds/Index8Length8.wav": "7685dc4811b1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') {
downloadOffline();
return;
}
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() |
// Attempt to download the resource online before falling back to
// the offline cache.
function onlineFirst(event) {
return event.respondWith(
fetch(event.request).then((response) => {
return caches.open(CACHE_NAME).then((cache) => {
cache.put(event.request, response.clone());
return response;
});
}).catch((error) => {
return caches.open(CACHE_NAME).then((cache) => {
return cache.match(event.request).then((response) => {
if (response != null) {
return response;
}
throw error;
});
});
})
);
}
| {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
} | identifier_body |
flutter_service_worker.js | 'use strict';
const MANIFEST = 'flutter-app-manifest';
const TEMP = 'flutter-temp-cache';
const CACHE_NAME = 'flutter-app-cache';
const RESOURCES = {
"assets/AssetManifest.json": "9b877279b82fcd9d9c6f92ca3c999525",
"assets/assets/sounds/Index10Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index10Length1.wav": "e3876e80d51303ca86beedab3f8ef10b",
"assets/assets/sounds/Index10Length2.wav": "06402c9c967ec183ee951b3786fc8c74",
"assets/assets/sounds/Index10Length3.wav": "024dcd98260d9cc4751ac6264f0fde0c",
"assets/assets/sounds/Index10Length4.wav": "c4a6f3bd2e0195e77f5f38b4191b288e",
"assets/assets/sounds/Index10Length6.wav": "0d76e4e436b4b43a0c2fbac65307964d",
"assets/assets/sounds/Index11Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index11Length1.wav": "7bdbcff632a39166b9fe9d74a788636e",
"assets/assets/sounds/Index11Length2.wav": "42e8f17a8f797493edc44bcc479d60fa",
"assets/assets/sounds/Index11Length3.wav": "a05e66d8ff1c998d9638567727e49645",
"assets/assets/sounds/Index11Length4.wav": "584125ca091d3be316c08ee797719d14",
"assets/assets/sounds/Index11Length6.wav": "6cb646a92523094c596756be14171dda",
"assets/assets/sounds/Index12Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index12Length1.wav": "11247e748acdc0679524b3160b840563",
"assets/assets/sounds/Index12Length2.wav": "29e138b05d825e94cb21b1a09a229d6e",
"assets/assets/sounds/Index12Length3.wav": "a43d31bab7e6ba732333e914b21366ca",
"assets/assets/sounds/Index12Length4.wav": "249220af94595507cf2b8be63ae06b53",
"assets/assets/sounds/Index13Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index13Length1.wav": "12d17bffd3558c90721779474d2db81b",
"assets/assets/sounds/Index13Length2.wav": "746f509a12e5f7f2ece7209e7722f3f9",
"assets/assets/sounds/Index13Length3.wav": "90665e5c989f7e7379e2a7e4d54f3aac",
"assets/assets/sounds/Index13Length4.wav": "4eac2adb92f81fe26c37e268f703fa2c",
"assets/assets/sounds/Index14Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index14Length1.wav": "aa7777a5e53a9514c89a01beedf05a13",
"assets/assets/sounds/Index14Length2.wav": "85f14e073d1698e24709436f91eae42c",
"assets/assets/sounds/Index14Length3.wav": "cdf2a57e901376c7655ac9b90728cabd",
"assets/assets/sounds/Index15Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index15Length1.wav": "51de52e5be0125a54c637a7a736b3055",
"assets/assets/sounds/Index15Length2.wav": "7db7b43cab2c9294dd458e1d0b57dfae",
"assets/assets/sounds/Index16Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index16Length1.wav": "099e44307f3bdf7174b4f44836ecc658",
"assets/assets/sounds/Index1Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index1Length1.wav": "772615684d0f2489bf9166a569356866",
"assets/assets/sounds/Index1Length12.wav": "4887a12550b4952f0f6e189ddbe323c7",
"assets/assets/sounds/Index1Length16.wav": "b119c78b320e927c6f3ffae5a9f30a04",
"assets/assets/sounds/Index1Length2.wav": "39eb8faadc824e8fed8d18866ce1a2d4",
"assets/assets/sounds/Index1Length3.wav": "3999d2770500a785fa6d10e571e16920",
"assets/assets/sounds/Index1Length4.wav": "01ae3bf9420f02a7ae3542ec8d898485",
"assets/assets/sounds/Index1Length6.wav": "1edf1340657f9bdc3d048a1409c3b791",
"assets/assets/sounds/Index1Length8.wav": "23571245b6b38b08e7938f26dade427a",
"assets/assets/sounds/Index2Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index2Length1.wav": "2b209a60499653a81c82b2e34cadee05",
"assets/assets/sounds/Index2Length12.wav": "b77500c1be01fc23fc87df1d02a88584",
"assets/assets/sounds/Index2Length2.wav": "d3b4a0fbcc215c7c9b398b481b646e69",
"assets/assets/sounds/Index2Length3.wav": "9d4681203fa79fda8a799f8abb2274d0",
"assets/assets/sounds/Index2Length4.wav": "9bc307dfc2810972d7fb851780bd4856",
"assets/assets/sounds/Index2Length6.wav": "12256e9827dd704044361b6bea633697",
"assets/assets/sounds/Index2Length8.wav": "236cc6760ff8709f610a5df29f7ee914",
"assets/assets/sounds/Index3Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index3Length1.wav": "25501e35dab4ac090f3d393961c71469",
"assets/assets/sounds/Index3Length12.wav": "6b153fdd906699556ac9c48fe901f824",
"assets/assets/sounds/Index3Length2.wav": "3edf9bea4df56bd4e8298b080258e7ed",
"assets/assets/sounds/Index3Length3.wav": "46134b6c36782d084478b5e9e6b762fe",
"assets/assets/sounds/Index3Length4.wav": "6f107aa30809dc0acc671da9d57e0c98",
"assets/assets/sounds/Index3Length6.wav": "665578c3526000a6bb0979a8b71826af",
"assets/assets/sounds/Index3Length8.wav": "3d806f30a2daba348cc6b946cbcccdf7",
"assets/assets/sounds/Index4Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index4Length1.wav": "3743e7eb1b51781664c79375b942c6da",
"assets/assets/sounds/Index4Length12.wav": "c43ada913dc18cad452a60dd182ed75e",
"assets/assets/sounds/Index4Length2.wav": "6cf0776fffa186c2c2a12558d83768a7",
"assets/assets/sounds/Index4Length3.wav": "f849bddc85b2e7c6fd1f63bcbbd40a6d",
"assets/assets/sounds/Index4Length4.wav": "6b24a2da10a2f1c6a0b04191473341a1",
"assets/assets/sounds/Index4Length6.wav": "c458131ab9551df4f184a25f4e11b263",
"assets/assets/sounds/Index4Length8.wav": "81464c9774dad9c6a3580647ed9d3666",
"assets/assets/sounds/Index5Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index5Length1.wav": "806ab8337ca57173787a4e702a1344ef",
"assets/assets/sounds/Index5Length12.wav": "c1fe4a3b8bc188a2815d18aabb8cef2d",
"assets/assets/sounds/Index5Length2.wav": "87db89ddecceb04ed380c9c0d4ff5859",
"assets/assets/sounds/Index5Length3.wav": "b05ee87ff7b682b9297c55dcb98d93a1",
"assets/assets/sounds/Index5Length4.wav": "3978dc493ca597ce2b1686c187b7de0c",
"assets/assets/sounds/Index5Length6.wav": "94340e0274991073a0cc1edbf4af7a3b",
"assets/assets/sounds/Index5Length8.wav": "4ae7798d014cc138b73f42b33898b042",
"assets/assets/sounds/Index6Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index6Length1.wav": "35f2f15487f285b565a6d84042e0d74f",
"assets/assets/sounds/Index6Length2.wav": "396cfd69805ac29ee24ab5fd58364887",
"assets/assets/sounds/Index6Length3.wav": "4dcac0661072a743e9a60d37be96c83f",
"assets/assets/sounds/Index6Length4.wav": "199f2803267b1bd86cf2aea43bd9e0e9",
"assets/assets/sounds/Index6Length6.wav": "7db149b1a69a631b7ac7d6eada0fe29b",
"assets/assets/sounds/Index6Length8.wav": "3c989280c19fa8396aaf3d5cd400ee52",
"assets/assets/sounds/Index7Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index7Length1.wav": "4ab63c3dcdbf66c8e8684e4571afdb31",
"assets/assets/sounds/Index7Length2.wav": "bda7c70ac50680c753f48c31c94abebd",
"assets/assets/sounds/Index7Length3.wav": "2e5c9f44fd79ead45bed0bf55a3be14d",
"assets/assets/sounds/Index7Length4.wav": "1db2d77c01871ee551dd68f75f66c107",
"assets/assets/sounds/Index7Length6.wav": "e4e82234ff5db0537f85be08df8cac2a",
"assets/assets/sounds/Index7Length8.wav": "5a58689b15708f9b02869612ab96a54b",
"assets/assets/sounds/Index8Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index8Length1.wav": "72aed5bbbe74c1d238c625940216f407",
"assets/assets/sounds/Index8Length2.wav": "8a27589e239f08d83a6eeb4b3a7f02e5",
"assets/assets/sounds/Index8Length3.wav": "fe792670c83be5650b0a692e008c68e9",
"assets/assets/sounds/Index8Length4.wav": "c2c338ea17f946e033e63e740e84853e",
"assets/assets/sounds/Index8Length6.wav": "bf359945806a4c1a1af022d3ab7c8388",
"assets/assets/sounds/Index8Length8.wav": "7685dc4811b1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') {
downloadOffline();
return;
}
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
}
// Attempt to download the resource online before falling back to
// the offline cache.
function | (event) {
return event.respondWith(
fetch(event.request).then((response) => {
return caches.open(CACHE_NAME).then((cache) => {
cache.put(event.request, response.clone());
return response;
});
}).catch((error) => {
return caches.open(CACHE_NAME).then((cache) => {
return cache.match(event.request).then((response) => {
if (response != null) {
return response;
}
throw error;
});
});
})
);
}
| onlineFirst | identifier_name |
flutter_service_worker.js | 'use strict';
const MANIFEST = 'flutter-app-manifest';
const TEMP = 'flutter-temp-cache';
const CACHE_NAME = 'flutter-app-cache';
const RESOURCES = {
"assets/AssetManifest.json": "9b877279b82fcd9d9c6f92ca3c999525",
"assets/assets/sounds/Index10Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index10Length1.wav": "e3876e80d51303ca86beedab3f8ef10b",
"assets/assets/sounds/Index10Length2.wav": "06402c9c967ec183ee951b3786fc8c74",
"assets/assets/sounds/Index10Length3.wav": "024dcd98260d9cc4751ac6264f0fde0c",
"assets/assets/sounds/Index10Length4.wav": "c4a6f3bd2e0195e77f5f38b4191b288e",
"assets/assets/sounds/Index10Length6.wav": "0d76e4e436b4b43a0c2fbac65307964d",
"assets/assets/sounds/Index11Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index11Length1.wav": "7bdbcff632a39166b9fe9d74a788636e",
"assets/assets/sounds/Index11Length2.wav": "42e8f17a8f797493edc44bcc479d60fa",
"assets/assets/sounds/Index11Length3.wav": "a05e66d8ff1c998d9638567727e49645",
"assets/assets/sounds/Index11Length4.wav": "584125ca091d3be316c08ee797719d14",
"assets/assets/sounds/Index11Length6.wav": "6cb646a92523094c596756be14171dda",
"assets/assets/sounds/Index12Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index12Length1.wav": "11247e748acdc0679524b3160b840563",
"assets/assets/sounds/Index12Length2.wav": "29e138b05d825e94cb21b1a09a229d6e",
"assets/assets/sounds/Index12Length3.wav": "a43d31bab7e6ba732333e914b21366ca",
"assets/assets/sounds/Index12Length4.wav": "249220af94595507cf2b8be63ae06b53",
"assets/assets/sounds/Index13Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index13Length1.wav": "12d17bffd3558c90721779474d2db81b",
"assets/assets/sounds/Index13Length2.wav": "746f509a12e5f7f2ece7209e7722f3f9",
"assets/assets/sounds/Index13Length3.wav": "90665e5c989f7e7379e2a7e4d54f3aac",
"assets/assets/sounds/Index13Length4.wav": "4eac2adb92f81fe26c37e268f703fa2c",
"assets/assets/sounds/Index14Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index14Length1.wav": "aa7777a5e53a9514c89a01beedf05a13",
"assets/assets/sounds/Index14Length2.wav": "85f14e073d1698e24709436f91eae42c",
"assets/assets/sounds/Index14Length3.wav": "cdf2a57e901376c7655ac9b90728cabd",
"assets/assets/sounds/Index15Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index15Length1.wav": "51de52e5be0125a54c637a7a736b3055",
"assets/assets/sounds/Index15Length2.wav": "7db7b43cab2c9294dd458e1d0b57dfae",
"assets/assets/sounds/Index16Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index16Length1.wav": "099e44307f3bdf7174b4f44836ecc658",
"assets/assets/sounds/Index1Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index1Length1.wav": "772615684d0f2489bf9166a569356866",
"assets/assets/sounds/Index1Length12.wav": "4887a12550b4952f0f6e189ddbe323c7",
"assets/assets/sounds/Index1Length16.wav": "b119c78b320e927c6f3ffae5a9f30a04",
"assets/assets/sounds/Index1Length2.wav": "39eb8faadc824e8fed8d18866ce1a2d4",
"assets/assets/sounds/Index1Length3.wav": "3999d2770500a785fa6d10e571e16920",
"assets/assets/sounds/Index1Length4.wav": "01ae3bf9420f02a7ae3542ec8d898485",
"assets/assets/sounds/Index1Length6.wav": "1edf1340657f9bdc3d048a1409c3b791",
"assets/assets/sounds/Index1Length8.wav": "23571245b6b38b08e7938f26dade427a",
"assets/assets/sounds/Index2Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index2Length1.wav": "2b209a60499653a81c82b2e34cadee05",
"assets/assets/sounds/Index2Length12.wav": "b77500c1be01fc23fc87df1d02a88584",
"assets/assets/sounds/Index2Length2.wav": "d3b4a0fbcc215c7c9b398b481b646e69",
"assets/assets/sounds/Index2Length3.wav": "9d4681203fa79fda8a799f8abb2274d0",
"assets/assets/sounds/Index2Length4.wav": "9bc307dfc2810972d7fb851780bd4856",
"assets/assets/sounds/Index2Length6.wav": "12256e9827dd704044361b6bea633697",
"assets/assets/sounds/Index2Length8.wav": "236cc6760ff8709f610a5df29f7ee914",
"assets/assets/sounds/Index3Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index3Length1.wav": "25501e35dab4ac090f3d393961c71469",
"assets/assets/sounds/Index3Length12.wav": "6b153fdd906699556ac9c48fe901f824",
"assets/assets/sounds/Index3Length2.wav": "3edf9bea4df56bd4e8298b080258e7ed",
"assets/assets/sounds/Index3Length3.wav": "46134b6c36782d084478b5e9e6b762fe",
"assets/assets/sounds/Index3Length4.wav": "6f107aa30809dc0acc671da9d57e0c98",
"assets/assets/sounds/Index3Length6.wav": "665578c3526000a6bb0979a8b71826af",
"assets/assets/sounds/Index3Length8.wav": "3d806f30a2daba348cc6b946cbcccdf7",
"assets/assets/sounds/Index4Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index4Length1.wav": "3743e7eb1b51781664c79375b942c6da",
"assets/assets/sounds/Index4Length12.wav": "c43ada913dc18cad452a60dd182ed75e",
"assets/assets/sounds/Index4Length2.wav": "6cf0776fffa186c2c2a12558d83768a7",
"assets/assets/sounds/Index4Length3.wav": "f849bddc85b2e7c6fd1f63bcbbd40a6d",
"assets/assets/sounds/Index4Length4.wav": "6b24a2da10a2f1c6a0b04191473341a1",
"assets/assets/sounds/Index4Length6.wav": "c458131ab9551df4f184a25f4e11b263",
"assets/assets/sounds/Index4Length8.wav": "81464c9774dad9c6a3580647ed9d3666",
"assets/assets/sounds/Index5Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index5Length1.wav": "806ab8337ca57173787a4e702a1344ef",
"assets/assets/sounds/Index5Length12.wav": "c1fe4a3b8bc188a2815d18aabb8cef2d",
"assets/assets/sounds/Index5Length2.wav": "87db89ddecceb04ed380c9c0d4ff5859",
"assets/assets/sounds/Index5Length3.wav": "b05ee87ff7b682b9297c55dcb98d93a1",
"assets/assets/sounds/Index5Length4.wav": "3978dc493ca597ce2b1686c187b7de0c",
"assets/assets/sounds/Index5Length6.wav": "94340e0274991073a0cc1edbf4af7a3b",
"assets/assets/sounds/Index5Length8.wav": "4ae7798d014cc138b73f42b33898b042",
"assets/assets/sounds/Index6Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index6Length1.wav": "35f2f15487f285b565a6d84042e0d74f",
"assets/assets/sounds/Index6Length2.wav": "396cfd69805ac29ee24ab5fd58364887",
"assets/assets/sounds/Index6Length3.wav": "4dcac0661072a743e9a60d37be96c83f",
"assets/assets/sounds/Index6Length4.wav": "199f2803267b1bd86cf2aea43bd9e0e9",
"assets/assets/sounds/Index6Length6.wav": "7db149b1a69a631b7ac7d6eada0fe29b",
"assets/assets/sounds/Index6Length8.wav": "3c989280c19fa8396aaf3d5cd400ee52",
"assets/assets/sounds/Index7Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index7Length1.wav": "4ab63c3dcdbf66c8e8684e4571afdb31",
"assets/assets/sounds/Index7Length2.wav": "bda7c70ac50680c753f48c31c94abebd",
"assets/assets/sounds/Index7Length3.wav": "2e5c9f44fd79ead45bed0bf55a3be14d",
"assets/assets/sounds/Index7Length4.wav": "1db2d77c01871ee551dd68f75f66c107",
"assets/assets/sounds/Index7Length6.wav": "e4e82234ff5db0537f85be08df8cac2a",
"assets/assets/sounds/Index7Length8.wav": "5a58689b15708f9b02869612ab96a54b",
"assets/assets/sounds/Index8Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index8Length1.wav": "72aed5bbbe74c1d238c625940216f407",
"assets/assets/sounds/Index8Length2.wav": "8a27589e239f08d83a6eeb4b3a7f02e5",
"assets/assets/sounds/Index8Length3.wav": "fe792670c83be5650b0a692e008c68e9",
"assets/assets/sounds/Index8Length4.wav": "c2c338ea17f946e033e63e740e84853e",
"assets/assets/sounds/Index8Length6.wav": "bf359945806a4c1a1af022d3ab7c8388",
"assets/assets/sounds/Index8Length8.wav": "7685dc4811b1713a82800e28d25e4165",
"assets/assets/sounds/Index9Length0.wav": "959c53595dba4a30fec8715149f2947a",
"assets/assets/sounds/Index9Length1.wav": "ef3438e19d91637ec197ce7a0b5bfd0d",
"assets/assets/sounds/Index9Length2.wav": "0b870a0d1df07e11fc413c41e70b5d53",
"assets/assets/sounds/Index9Length3.wav": "f2cdebe3e1ce45cd4263368a22fe94ec",
"assets/assets/sounds/Index9Length4.wav": "dc5120192375369d8e2e447894b356b1",
"assets/assets/sounds/Index9Length6.wav": "590571d21627f180ab3f970abf145585",
"assets/assets/sounds/Index9Length8.wav": "1d3b950fd37d932669e0dac2054aefc1",
"assets/assets/sounds/metronome.wav": "fe5ef28c9c447aef8116393c4921a937",
"assets/FontManifest.json": "a5aee76623c8e3df7689a29833438d1a",
"assets/fonts/MaterialIcons-Regular.otf": "1288c9e28052e028aba623321f7826ac",
"assets/fonts/Musisync-KVLZ.ttf": "21c3d12f3e3ccbae6bd5441f77538930",
"assets/fonts/Musisync-qYy6.ttf": "7663fd6b156c6882e4b66bd72556e0ee",
"assets/NOTICES": "2d7d1132aa7574db1cc58b232ff796a0",
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "115e937bb829a890521f72d2e664b632",
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
"index.html": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"/": "0ffe178a82a6fdf5e724aae8d3d1d70e",
"main.dart.js": "0f32e2c67bc6400a2dd8d2bd7cadd89b",
"manifest.json": "0f93fc4cca58b0f1e83b1b53b384cc49",
"version.json": "dfe1ec7b18cea6bb77c40e3c6a6958b3"
};
// The application shell files that are downloaded before a service worker can
// start.
const CORE = [
"/",
"main.dart.js",
"index.html",
"assets/NOTICES",
"assets/AssetManifest.json",
"assets/FontManifest.json"];
// During install, the TEMP cache is populated with the application shell files.
self.addEventListener("install", (event) => {
self.skipWaiting();
return event.waitUntil(
caches.open(TEMP).then((cache) => {
return cache.addAll(
CORE.map((value) => new Request(value + '?revision=' + RESOURCES[value], {'cache': 'reload'})));
})
);
});
// During activate, the cache is populated with the temp files downloaded in
// install. If this service worker is upgrading from one with a saved
// MANIFEST, then use this to retain unchanged resource files.
self.addEventListener("activate", function(event) {
return event.waitUntil(async function() {
try {
var contentCache = await caches.open(CACHE_NAME);
var tempCache = await caches.open(TEMP);
var manifestCache = await caches.open(MANIFEST);
var manifest = await manifestCache.match('manifest');
// When there is no prior manifest, clear the entire cache.
if (!manifest) {
await caches.delete(CACHE_NAME);
contentCache = await caches.open(CACHE_NAME);
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
}
var oldManifest = await manifest.json();
var origin = self.location.origin;
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
// If a resource from the old manifest is not in the new cache, or if
// the MD5 sum has changed, delete it. Otherwise the resource is left
// in the cache and can be reused by the new service worker.
if (!RESOURCES[key] || RESOURCES[key] != oldManifest[key]) {
await contentCache.delete(request);
}
}
// Populate the cache with the app shell TEMP files, potentially overwriting
// cache files preserved above.
for (var request of await tempCache.keys()) {
var response = await tempCache.match(request);
await contentCache.put(request, response);
}
await caches.delete(TEMP);
// Save the manifest to make future upgrades efficient.
await manifestCache.put('manifest', new Response(JSON.stringify(RESOURCES)));
return;
} catch (err) {
// On an unhandled exception the state of the cache cannot be guaranteed.
console.error('Failed to upgrade service worker: ' + err);
await caches.delete(CACHE_NAME);
await caches.delete(TEMP);
await caches.delete(MANIFEST);
}
}());
});
// The fetch handler redirects requests for RESOURCE files to the service
// worker cache.
self.addEventListener("fetch", (event) => {
if (event.request.method !== 'GET') {
return;
}
var origin = self.location.origin;
var key = event.request.url.substring(origin.length + 1);
// Redirect URLs to the index.html
if (key.indexOf('?v=') != -1) {
key = key.split('?v=')[0];
}
if (event.request.url == origin || event.request.url.startsWith(origin + '/#') || key == '') {
key = '/';
}
// If the URL is not the RESOURCE list then return to signal that the
// browser should take over.
if (!RESOURCES[key]) {
return;
}
// If the URL is the index.html, perform an online-first request.
if (key == '/') {
return onlineFirst(event);
}
event.respondWith(caches.open(CACHE_NAME)
.then((cache) => {
return cache.match(event.request).then((response) => {
// Either respond with the cached resource, or perform a fetch and
// lazily populate the cache.
return response || fetch(event.request).then((response) => {
cache.put(event.request, response.clone());
return response;
});
})
})
);
});
self.addEventListener('message', (event) => {
// SkipWaiting can be used to immediately activate a waiting service worker.
// This will also require a page refresh triggered by the main worker.
if (event.data === 'skipWaiting') {
self.skipWaiting();
return;
}
if (event.data === 'downloadOffline') |
});
// Download offline will check the RESOURCES for all files not in the cache
// and populate them.
async function downloadOffline() {
var resources = [];
var contentCache = await caches.open(CACHE_NAME);
var currentContent = {};
for (var request of await contentCache.keys()) {
var key = request.url.substring(origin.length + 1);
if (key == "") {
key = "/";
}
currentContent[key] = true;
}
for (var resourceKey of Object.keys(RESOURCES)) {
if (!currentContent[resourceKey]) {
resources.push(resourceKey);
}
}
return contentCache.addAll(resources);
}
// Attempt to download the resource online before falling back to
// the offline cache.
function onlineFirst(event) {
return event.respondWith(
fetch(event.request).then((response) => {
return caches.open(CACHE_NAME).then((cache) => {
cache.put(event.request, response.clone());
return response;
});
}).catch((error) => {
return caches.open(CACHE_NAME).then((cache) => {
return cache.match(event.request).then((response) => {
if (response != null) {
return response;
}
throw error;
});
});
})
);
}
| {
downloadOffline();
return;
} | conditional_block |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => |
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| {
let return_value = to_value(&repos)?;
return Ok(return_value);
} | conditional_block |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
} | }
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
} |
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) } | random_line_split |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn | () {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| serde_repo_configuration | identifier_name |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> |
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send + 'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send + 'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator, .. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
} | identifier_body |
gulpfile.js | var gulp = require('gulp'),
argv = require('yargs').argv,
cache = require('gulp-cache'),
concat = require('gulp-concat'),
del = require('del'),
gulpif = require('gulp-if'),
gutil = require('gulp-util')
notify = require('gulp-notify'),
plumber = require('gulp-plumber'),
q = require('q'),
rename = require('gulp-rename'),
replace = require('gulp-replace');
var config = require('./package.json');
var settings = config.settings;
settings.liveReload=false;
settings.plumberConfig=function(){
return {'errorHandler': onError};
};
/**
* browser-sync task for starting a server. This will open a browser for you. Point multiple browsers / devices to the same url and watch the magic happen.
* Depends on: watch
*/
gulp.task('browser-sync', ['watch'], function() {
var browserSync = require('browser-sync');
// Watch any files in dist/*, reload on change
gulp.watch([settings.dist + '**']).on('change', function(){browserSync.reload({});notify({ message: 'Reload browser' });});
return browserSync({
server: {
baseDir: settings.dist
},
ghostMode: {
clicks: true,
location: true,
forms: true,
scroll: true
},
open: "external",
injectChanges: true, // inject CSS changes (false force a reload)
browser: ["google chrome"],
scrollProportionally: true, // Sync viewports to TOP position
scrollThrottle: 50,
});
});
/**
* Build and copy all styles, scripts, images and fonts.
* Depends on: clean
*/
gulp.task('build', ['info', 'clean'], function() {
gulp.start('styles', 'scripts', 'images', 'copy', 'todo');
});
/**
* Cleans the `dist` folder and other generated files
*/
gulp.task('clean', ['clear-cache'], function(cb) {
del([settings.dist, 'todo.md', 'todo.json'], cb);
});
/**
* Clears the cache used by gulp-cache
*/
gulp.task('clear-cache', function() {
// Or, just call this for everything
cache.clearAll();
});
/**
* Copies all to dist/
*/
gulp.task('copy', ['copy-fonts', 'copy-template', 'copy-index'], function() {});
/**
* Task for copying fonts only
*/
gulp.task('copy-fonts', function() {
var deferred = q.defer();
// copy all fonts
setTimeout(function() {
gulp.src( settings.src + 'fonts/**')
.pipe(gulp.dest(settings.dist + 'fonts'));
deferred.resolve();
}, 1);
return deferred.promise;
});
/**
* task for copying templates only
*/
gulp.task('copy-template', function() {
// copy all html && json
return gulp.src( [settings.src + 'js/app/**/*.html', settings.src + 'js/app/**/*.json'])
.pipe(cache(gulp.dest('dist/js/app')));
});
/**
* Task for copying index page only. Optionally add live reload script to it
*/
gulp.task('copy-index', function() {
// copy the index.html
return gulp.src(settings.src + 'index.html')
.pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js')))
.pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', '')))
.pipe(gulpif(settings.liveReload, replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1")))
.pipe(cache(gulp.dest(settings.dist)));
});
/**
* Default task.
* Depends on: build
*/
gulp.task('default', ['build']);
/**
* Create Javascript documentation
*/
gulp.task('docs-js', ['todo'], function(){
var gulpDoxx = require('gulp-doxx');
gulp.src([settings.src + '/js/**/*.js', 'README.md', settings.reports + '/TODO.md'])
.pipe(gulpDoxx({
title: config.name,
urlPrefix: "file:///"+__dirname+settings.reports
}))
.pipe(gulp.dest(settings.reports));
});
/**
* Task to optimize and deploy all images found in folder `src/img/**`. Result is copied to `dist/img`
*/
gulp.task('images', function() {
var imagemin = require('gulp-imagemin');
var deferred = q.defer();
setTimeout(function() {
gulp.src(settings.src + 'img/**/*')
.pipe(plumber(settings.plumberConfig()))
.pipe(cache(imagemin({ optimizationLevel: 5, progressive: true, interlaced: true })))
.pipe(gulp.dest(settings.dist + 'img'));
deferred.resolve();
}, 1);
return deferred.promise;
});
/**
* log some info
*/
gulp.task('info',function(){
// log project details
gutil.log( gutil.colors.cyan("Running gulp on project "+config.name+" v"+ config.version) );
gutil.log( gutil.colors.cyan("Author: " + config.author[0].name) );
gutil.log( gutil.colors.cyan("Email : " + config.author[0].email) );
gutil.log( gutil.colors.cyan("Site : " + config.author[0].url) );
gutil.log( gutil.colors.cyan("Author: " + config.author[1].name) );
gutil.log( gutil.colors.cyan("Email : " + config.author[1].email) );
gutil.log( gutil.colors.cyan("Site : " + config.author[1].url) );
// log info
gutil.log("If you have an enhancement or encounter a bug, please report them on", gutil.colors.magenta(config.bugs.url));
});
/**
* Start the live reload server. Live reload will be triggered when a file in the `dist` folder changes. This will add a live-reload script to the index.html page, which makes it all happen.
* Depends on: watch
*/
gulp.task('live-reload', ['watch'], function() {
var livereload = require('gulp-livereload');
settings.liveReload = true;
// first, delete the index.html from the dist folder as we will copy it later
del([settings.dist + 'index.html']);
// add livereload script to the index.html
gulp.src([settings.src + 'index.html'])
.pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js')))
.pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', '')))
.pipe(replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1"))
.pipe(gulp.dest(settings.dist));
// Create LiveReload server
livereload.listen();
// Watch any files in dist/*, reload on change
gulp.watch([settings.dist + '**']).on('change', livereload.changed);
});
/**
* Task to handle and deploy all javascript, application & vendor
*
* Depends on: scripts-app, scripts-vendor
*/
gulp.task('scripts', ['scripts-app','scripts-vendor']);
/**
* Removes the node_modules
*/
gulp.task('remove',['clean'], function(cb){
del('node_modules', cb);
});
/**
* Minifies all javascript found in the `src/js/**` folder. All files will be concatenated into `app.js`. Minified and non-minified versions are copied to the dist folder.
* This will also generete sourcemaps for the minified version.
*
* Depends on: docs
*/
gulp.task('scripts-app', ['docs-js'], function() {
var jshint = require('gulp-jshint'),
ngannotate = require('gulp-ng-annotate'),
stripDebug = require('gulp-strip-debug'),
stylish = require('jshint-stylish'),
sourcemaps = require('gulp-sourcemaps'),
uglify = require('gulp-uglify');
// gulpify the huna library
gulp.src([settings.src + 'js/app/huna.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
return gulp.src(['!'+settings.src + 'js/app/huna.js', settings.src + 'js/app/**/*.js'])
.pipe(plumber(settings.plumberConfig()))
.pipe(ngannotate({gulpWarnings: false}))
.pipe(jshint())
.pipe(jshint.reporter(stylish))
.pipe(concat('app.js'))
.pipe(gulp.dest(settings.dist + 'js'))
// make minified
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(!argv.dev, stripDebug()))
.pipe(sourcemaps.init())
.pipe(gulpif(!argv.dev, uglify()))
.pipe(sourcemaps.write())
.pipe(gulp.dest(settings.dist + 'js'));
});
/**
* Task to handle all vendor specific javasript. All vendor javascript will be copied to the dist directory. Also a concatinated version will be made, available in \dist\js\vendor\vendor.js
*/
gulp.task('scripts-vendor', ['scripts-vendor-maps'], function() {
// script must be included in the right order. First include angular, then angular-route
return gulp.src([settings.src + 'js/vendor/*/**/angular.min.js',settings.src + 'js/vendor/**/*.js'])
.pipe(gulp.dest(settings.dist + 'js/vendor'))
.pipe(concat('vendor.js'))
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Copy all vendor .js.map files to the vendor location
*/
gulp.task('scripts-vendor-maps', function(){
var flatten = require('gulp-flatten');
return gulp.src(settings.src + 'js/vendor/**/*.js.map')
.pipe(flatten())
.pipe(gulp.dest(settings.dist + 'js/vendor'));
});
/**
* Task to start a server on port 4000.
*/
gulp.task('server', function(){
var express = require('express'),
app = express(),
url = require('url'),
port = argv.port||settings.serverport,
proxy = require('proxy-middleware');
app.use(express.static(__dirname + "/dist"));
if (argv.remote) {
app.use('/api', proxy(url.parse('http://huna.tuvok.nl:1337/api')));
} else {
app.use('/api', proxy(url.parse('http://localhost:1337/api')));
}
app.listen(port);
gutil.log('Server started. Port', port,"baseDir",__dirname+"/"+settings.dist);
});
gulp.task('nodemon', function(cb) {
var nodemon = require('gulp-nodemon');
// We use this `called` variable to make sure the callback is only executed once
var called = false;
return nodemon({
script: 'app.js',
watch: ['app.js', 'api/**/*.*', 'config/**/*.*']
})
.on('start', function onStart() {
if (!called) {
cb();
}
called = true;
})
.on('restart', function onRestart() {
// Also reload the browsers after a slight delay
setTimeout(function reload() {
browserSync.reload({
stream: false
});
}, 500);
});
});
/**
* Task to start the backend servers.
* Depends on: backend-mongo, backend-server
*/
gulp.task('backend', ['backend-mongo', 'backend-server'], function () {});
/**
* Task to start the backend mongo server
* should be running before the backend-server
*/
gulp.task('backend-mongo', function () {
var exec = require('child_process').exec;
exec('mongod', function (err, stdout, stderr) {
console.log(stdout);
console.log(stderr);
onError(err);
});
});
/**
* Task to start up the backend server
* run the mongo db first
*/
gulp.task('backend-server', function () {
var exec = require('child_process').exec;
exec('node app.js', function (err, stdout, stderr) {
console.log(stdout);
console.log(stderr);
onError(err);
});
});
/**
* Task to start a server on port 4000 and used the live reload functionality.
* Depends on: server, live-reload
*/
gulp.task('start', ['live-reload', 'server'], function(){});
/**
* Compile Sass into Css and minify it. Minified and non-minified versions are copied to the dist folder.
* This will also auto prefix vendor specific rules.
*/
gulp.task('styles', function() {
var autoprefixer = require('gulp-autoprefixer'),
minifycss = require('gulp-minify-css'),
sass = require('gulp-sass');
return gulp.src([settings.src + 'styles/main.scss', settings.src + '/js/vendor/**/c3.min.css'])
.pipe(plumber(settings.plumberConfig()))
.pipe(sass({ style: 'expanded' }))
// .pipe(autoprefixer('last 2 version', 'safari 5', 'ie 8', 'ie 9', 'opera 12.1', 'ios 6', 'android 4'))
.pipe(gulp.dest(settings.dist + 'css'))
.pipe(rename({suffix: '.min'}))
.pipe(minifycss())
.pipe(gulp.dest(settings.dist + 'css'));
});
/**
* Output TODO's & FIXME's in markdown and json file as well
*/
gulp.task('todo', function() {
var todo = require('gulp-todo');
gulp.src([settings.src + 'js/app/**/*.js',settings.src + 'styles/app/**/*.scss'])
.pipe(plumber(settings.plumberConfig()))
.pipe(todo())
.pipe(gulp.dest(settings.reports)) //output todo.md as markdown
.pipe(todo.reporter('json', {fileName: 'todo.json'}))
.pipe(gulp.dest(settings.reports)) //output todo.json as json
});
/**
* Watches changes to template, Sass, javascript and image files. On change this will run the appropriate task, either: copy styles, scripts or images.
*/
gulp.task('watch', function() {
// watch index.html
gulp.watch(settings.src + 'index.html', ['copy-index']);
// watch html files
gulp.watch(settings.src + '**/*.html', ['copy-template']);
// watch fonts
gulp.watch(settings.src + 'fonts/**', ['copy-fonts']);
// Watch .scss files
gulp.watch(settings.src + 'styles/**/*.scss', ['styles']);
// Watch app .js files
gulp.watch(settings.src + 'js/app/**/*.js', ['scripts-app']);
// Watch vendor .js files
gulp.watch(settings.src + 'js/vendor/**/*.js', ['scripts-vendor']);
// Watch image files
gulp.watch(settings.src + 'img/**/*', ['images']);
});
function | (error){
// TODO log error with gutil
notify.onError(function (error) {
return error.message;
});
this.emit('end');
} | onError | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.