file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ast.js | /**
* Abstract Syntax Tree for a localization message in 'Banana' format
* @param {string} message
* @param {Object} options options
* @param {boolean} [options.wikilinks] whether the wiki style link syntax should be parsed or not
*/
export default function BananaMessage (message, { wikilinks = false } = {}) {
let pos = 0
// Try parsers until one works, if none work return null
function choice (parserSyntax) {
return () => {
for (let i = 0; i < parserSyntax.length; i++) {
const result = parserSyntax[i]()
if (result !== null) {
return result
}
}
return null
}
}
// Try several parserSyntax-es in a row.
// All must succeed; otherwise, return null.
// This is the only eager one.
function sequence (parserSyntax) {
const originalPos = pos
const result = []
for (let i = 0; i < parserSyntax.length; i++) {
const res = parserSyntax[i]()
if (res === null) {
pos = originalPos
return null
}
result.push(res)
}
return result
}
// Run the same parser over and over until it fails.
// Must succeed a minimum of n times; otherwise, return null.
function | (n, p) {
return () => {
const originalPos = pos
const result = []
let parsed = p()
while (parsed !== null) {
result.push(parsed)
parsed = p()
}
if (result.length < n) {
pos = originalPos
return null
}
return result
}
}
// Helpers -- just make parserSyntax out of simpler JS builtin types
function makeStringParser (s) {
const len = s.length
return () => {
let result = null
if (message.slice(pos, pos + len) === s) {
result = s
pos += len
}
return result
}
}
function makeRegexParser (regex) {
return () => {
const matches = message.slice(pos).match(regex)
if (matches === null) {
return null
}
pos += matches[0].length
return matches[0]
}
}
const whitespace = makeRegexParser(/^\s+/)
const pipe = makeStringParser('|')
const colon = makeStringParser(':')
const backslash = makeStringParser('\\')
const anyCharacter = makeRegexParser(/^./)
const dollar = makeStringParser('$')
const digits = makeRegexParser(/^\d+/)
const doubleQuote = makeStringParser('"')
const singleQuote = makeStringParser('\'')
// A literal is any character except the special characters in the message markup
// Special characters are: [, ], {, }, $, \, <, >
// If wikilinks parsing is disabled, treat [ and ] as regular text.
const regularLiteral = wikilinks ? makeRegexParser(/^[^{}[\]$<\\]/) : makeRegexParser(/^[^{}$<\\]/)
const regularLiteralWithoutBar = wikilinks ? makeRegexParser(/^[^{}[\]$\\|]/) : makeRegexParser(/^[^{}$\\|]/)
const regularLiteralWithoutSpace = wikilinks ? makeRegexParser(/^[^{}[\]$\s]/) : makeRegexParser(/^[^{}$\s]/)
// There is a general pattern:
// parse a thing;
// if it worked, apply transform,
// otherwise return null.
// But using this as a combinator seems to cause problems
// when combined with nOrMore().
// May be some scoping issue.
function transform (p, fn) {
return () => {
const result = p()
return result === null ? null : fn(result)
}
}
// Used to define "literals" within template parameters. The pipe
// character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literalWithoutBar () {
const result = nOrMore(1, escapedOrLiteralWithoutBar)()
return result === null ? null : result.join('')
}
// Used to define "literals" within template parameters.
// The pipe character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literal () {
const result = nOrMore(1, escapedOrRegularLiteral)()
return result === null ? null : result.join('')
}
const escapedOrLiteralWithoutSpace = choice([
escapedLiteral,
regularLiteralWithoutSpace
])
// Used to define "literals" without spaces, in space-delimited situations
function literalWithoutSpace () {
const result = nOrMore(1, escapedOrLiteralWithoutSpace)()
return result === null ? null : result.join('')
}
function escapedLiteral () {
const result = sequence([backslash, anyCharacter])
return result === null ? null : result[1]
}
choice([escapedLiteral, regularLiteralWithoutSpace])
const escapedOrLiteralWithoutBar = choice([escapedLiteral, regularLiteralWithoutBar])
const escapedOrRegularLiteral = choice([escapedLiteral, regularLiteral])
function replacement () {
const result = sequence([dollar, digits])
if (result === null) {
return null
}
return ['REPLACE', parseInt(result[1], 10) - 1]
}
const templateName = transform(
// see $wgLegalTitleChars
// not allowing : due to the need to catch "PLURAL:$1"
makeRegexParser(/^[ !"$&'()*,./0-9;=?@A-Z^_`a-z~\x80-\xFF+-]+/),
function (result) {
return result.toString()
}
)
function templateParam () {
const result = sequence([pipe, nOrMore(0, paramExpression)])
if (result === null) {
return null
}
const expr = result[1]
// use a "CONCAT" operator if there are multiple nodes,
// otherwise return the first node, raw.
return expr.length > 1 ? ['CONCAT'].concat(expr) : expr[0]
}
function templateWithReplacement () {
const result = sequence([templateName, colon, replacement])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutReplacement () {
const result = sequence([templateName, colon, paramExpression])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutFirstParameter () {
const result = sequence([templateName, colon])
return result === null ? null : [result[0], '']
}
const templateContents = choice([
function () {
const res = sequence([
// templates can have placeholders for dynamic
// replacement eg: {{PLURAL:$1|one car|$1 cars}}
// or no placeholders eg:{{GRAMMAR:genitive|{{SITENAME}}}
// Templates can also have empty first param eg:{{GENDER:|A|B|C}}
// to indicate current user in the context. We need to parse them without
// error, but can only fallback to gender neutral form.
choice([templateWithReplacement, templateWithOutReplacement, templateWithOutFirstParameter]),
nOrMore(0, templateParam)
])
return res === null ? null : res[0].concat(res[1])
},
function () {
const res = sequence([templateName, nOrMore(0, templateParam)])
if (res === null) {
return null
}
return [res[0]].concat(res[1])
}
])
const openTemplate = makeStringParser('{{')
const closeTemplate = makeStringParser('}}')
const openWikilink = makeStringParser('[[')
const closeWikilink = makeStringParser(']]')
const openExtlink = makeStringParser('[')
const closeExtlink = makeStringParser(']')
/**
* An expression in the form of {{...}}
*/
function template () {
const result = sequence([openTemplate, templateContents, closeTemplate])
return result === null ? null : result[1]
}
function pipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression),
pipe,
nOrMore(1, expression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0]),
['CONCAT'].concat(result[2])
]
}
function unpipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0])
]
}
const wikilinkContents = choice([
pipedWikilink,
unpipedWikilink
])
function wikilink () {
let result = null
const parsedResult = sequence([
openWikilink,
wikilinkContents,
closeWikilink
])
if (parsedResult !== null) {
const parsedLinkContents = parsedResult[1]
result = ['WIKILINK'].concat(parsedLinkContents)
}
return result
}
// this extlink MUST have inner contents, e.g. [foo] not allowed; [foo bar] [foo <i>bar</i>], etc. are allowed
function extlink () {
let result = null
const parsedResult = sequence([
openExtlink,
nOrMore(1, nonWhitespaceExpression),
whitespace,
nOrMore(1, expression),
closeExtlink
])
if (parsedResult !== null) {
// When the entire link target is a single parameter, we can't use CONCAT, as we allow
// passing fancy parameters (like a whole jQuery object or a function) to use for the
// link. Check only if it's a single match, since we can either do CONCAT or not for
// singles with the same effect.
const target = parsedResult[1].length === 1
? parsedResult[1][0]
: ['CONCAT'].concat(parsedResult[1])
result = [
'EXTLINK',
target,
['CONCAT'].concat(parsedResult[3])
]
}
return result
}
const asciiAlphabetLiteral = makeRegexParser(/^[A-Za-z]+/)
/**
* Checks if HTML is allowed
*
* @param {string} startTagName HTML start tag name
* @param {string} endTagName HTML start tag name
* @param {Object} attributes array of consecutive key value pairs,
* with index 2 * n being a name and 2 * n + 1 the associated value
* @return {boolean} true if this is HTML is allowed, false otherwise
*/
function isAllowedHtml (startTagName, endTagName, attributes, settings = {
// Whitelist for allowed HTML elements in wikitext.
// Self-closing tags are not currently supported.
allowedHtmlElements: ['b', 'bdi', 'del', 'i', 'ins', 'u', 'font', 'big', 'small', 'sub',
'sup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'cite', 'code', 'em', 's', 'strike', 'strong',
'tt', 'var', 'div', 'center', 'blockquote', 'ol', 'ul', 'dl', 'table', 'caption', 'pre',
'ruby', 'rb', 'rp', 'rt', 'rtc', 'p', 'span', 'abbr', 'dfn', 'kbd', 'samp', 'data', 'time',
'mark', 'li', 'dt', 'dd'],
// Key tag name, value allowed attributes for that tag.
// Sourced from Parsoid's Sanitizer::setupAttributeWhitelist
allowedHtmlCommonAttributes: [
// HTML
'id',
'class',
'style',
'lang',
'dir',
'title',
// WAI-ARIA
'aria-describedby',
'aria-flowto',
'aria-hidden',
'aria-label',
'aria-labelledby',
'aria-owns',
'role',
// RDFa
// These attributes are specified in section 9 of
// https://www.w3.org/TR/2008/REC-rdfa-syntax-20081014
'about',
'property',
'resource',
'datatype',
'typeof',
// Microdata. These are specified by
// https://html.spec.whatwg.org/multipage/microdata.html#the-microdata-model
'itemid',
'itemprop',
'itemref',
'itemscope',
'itemtype'
],
// Attributes allowed for specific elements.
// Key is element name in lower case
// Value is array of allowed attributes for that element
allowedHtmlAttributesByElement: {}
}) {
startTagName = startTagName.toLowerCase()
endTagName = endTagName.toLowerCase()
if (startTagName !== endTagName || settings.allowedHtmlElements.indexOf(startTagName) === -1) {
return false
}
const badStyle = /[\000-\010\013\016-\037\177]|expression|filter\s*:|accelerator\s*:|-o-link\s*:|-o-link-source\s*:|-o-replace\s*:|url\s*\(|image\s*\(|image-set\s*\(/i
for (let i = 0, len = attributes.length; i < len; i += 2) {
const attributeName = attributes[i]
if (settings.allowedHtmlCommonAttributes.indexOf(attributeName) === -1 &&
(settings.allowedHtmlAttributesByElement[startTagName] || []).indexOf(attributeName) === -1) {
return false
}
if (attributeName === 'style' && attributes[i + 1].search(badStyle) !== -1) {
return false
}
}
return true
}
function doubleQuotedHtmlAttributeValue () {
const htmlDoubleQuoteAttributeValue = makeRegexParser(/^[^"]*/)
const parsedResult = sequence([
doubleQuote,
htmlDoubleQuoteAttributeValue,
doubleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function singleQuotedHtmlAttributeValue () {
const htmlSingleQuoteAttributeValue = makeRegexParser(/^[^']*/)
const parsedResult = sequence([
singleQuote,
htmlSingleQuoteAttributeValue,
singleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function htmlAttribute () {
const htmlAttributeEquals = makeRegexParser(/^\s*=\s*/)
const parsedResult = sequence([
whitespace,
asciiAlphabetLiteral,
htmlAttributeEquals,
choice([
doubleQuotedHtmlAttributeValue,
singleQuotedHtmlAttributeValue
])
])
return parsedResult === null ? null : [parsedResult[1], parsedResult[3]]
}
function htmlAttributes () {
const parsedResult = nOrMore(0, htmlAttribute)()
// Un-nest attributes array due to structure of emitter operations.
return Array.prototype.concat.apply(['HTMLATTRIBUTES'], parsedResult)
}
// Parse, validate and escape HTML content in messages using a whitelisted tag names
// and attributes.
function html () {
let result = null
// Break into three sequence calls. That should allow accurate reconstruction of the original HTML, and requiring an exact tag name match.
// 1. open through closeHtmlTag
// 2. expression
// 3. openHtmlEnd through close
// This will allow recording the positions to reconstruct if HTML is to be treated as text.
const startOpenTagPos = pos
const openHtmlStartTag = makeStringParser('<')
const optionalForwardSlash = makeRegexParser(/^\/?/)
const closeHtmlTag = makeRegexParser(/^\s*>/)
const parsedOpenTagResult = sequence([
openHtmlStartTag,
asciiAlphabetLiteral,
htmlAttributes,
optionalForwardSlash,
closeHtmlTag
])
if (parsedOpenTagResult === null) {
return null
}
const endOpenTagPos = pos
const startTagName = parsedOpenTagResult[1]
const parsedHtmlContents = nOrMore(0, expression)()
const startCloseTagPos = pos
const openHtmlEndTag = makeStringParser('</')
const parsedCloseTagResult = sequence([
openHtmlEndTag,
asciiAlphabetLiteral,
closeHtmlTag
])
if (parsedCloseTagResult === null) {
// Closing tag failed. Return the start tag and contents.
return ['CONCAT', message.slice(startOpenTagPos, endOpenTagPos)]
.concat(parsedHtmlContents)
}
const endCloseTagPos = pos
const endTagName = parsedCloseTagResult[1]
const wrappedAttributes = parsedOpenTagResult[2]
const attributes = wrappedAttributes.slice(1)
if (isAllowedHtml(startTagName, endTagName, attributes)) {
result = ['HTMLELEMENT', startTagName, wrappedAttributes]
.concat(parsedHtmlContents)
} else {
// HTML is not allowed, so contents will remain how
// it was, while HTML markup at this level will be
// treated as text
// E.g. assuming script tags are not allowed:
//
// <script>[[Foo|bar]]</script>
//
// results in '<script>' and '</script>'
// (not treated as an HTML tag), surrounding a fully
// parsed HTML link.
//
// Concatenate everything from the tag, flattening the contents.
const escapeHTML = (unsafeContent) => unsafeContent
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''')
result = ['CONCAT', escapeHTML(message.slice(startOpenTagPos, endOpenTagPos))]
.concat(parsedHtmlContents, escapeHTML(message.slice(startCloseTagPos, endCloseTagPos)))
}
return result
}
const nonWhitespaceExpression = choice([
template,
replacement,
wikilink,
extlink,
literalWithoutSpace
])
const expression = choice([
template,
replacement,
wikilink,
extlink,
html,
literal
])
const paramExpression = choice([template, replacement, literalWithoutBar])
function start () {
const result = nOrMore(0, expression)()
if (result === null) {
return null
}
return ['CONCAT'].concat(result)
}
const result = start()
/*
* For success, the pos must have gotten to the end of the input
* and returned a non-null.
* n.b. This is part of language infrastructure, so we do not throw an internationalizable message.
*/
if (result === null || pos !== message.length) {
throw new Error('Parse error at position ' + pos.toString() + ' in input: ' + message)
}
return result
}
| nOrMore | identifier_name |
ast.js | /**
* Abstract Syntax Tree for a localization message in 'Banana' format
* @param {string} message
* @param {Object} options options
* @param {boolean} [options.wikilinks] whether the wiki style link syntax should be parsed or not
*/
export default function BananaMessage (message, { wikilinks = false } = {}) {
let pos = 0
// Try parsers until one works, if none work return null
function choice (parserSyntax) {
return () => {
for (let i = 0; i < parserSyntax.length; i++) {
const result = parserSyntax[i]()
if (result !== null) {
return result
}
}
return null
}
}
// Try several parserSyntax-es in a row.
// All must succeed; otherwise, return null.
// This is the only eager one.
function sequence (parserSyntax) {
const originalPos = pos
const result = []
for (let i = 0; i < parserSyntax.length; i++) {
const res = parserSyntax[i]()
if (res === null) {
pos = originalPos
return null
}
result.push(res)
}
return result
}
// Run the same parser over and over until it fails.
// Must succeed a minimum of n times; otherwise, return null.
function nOrMore (n, p) {
return () => {
const originalPos = pos
const result = []
let parsed = p()
while (parsed !== null) {
result.push(parsed)
parsed = p()
}
if (result.length < n) {
pos = originalPos
return null
}
return result
}
}
// Helpers -- just make parserSyntax out of simpler JS builtin types
function makeStringParser (s) {
const len = s.length
return () => {
let result = null
if (message.slice(pos, pos + len) === s) {
result = s
pos += len
}
return result
}
}
function makeRegexParser (regex) {
return () => {
const matches = message.slice(pos).match(regex)
if (matches === null) {
return null
}
pos += matches[0].length
return matches[0]
}
}
const whitespace = makeRegexParser(/^\s+/)
const pipe = makeStringParser('|')
const colon = makeStringParser(':')
const backslash = makeStringParser('\\')
const anyCharacter = makeRegexParser(/^./)
const dollar = makeStringParser('$')
const digits = makeRegexParser(/^\d+/)
const doubleQuote = makeStringParser('"')
const singleQuote = makeStringParser('\'')
// A literal is any character except the special characters in the message markup
// Special characters are: [, ], {, }, $, \, <, >
// If wikilinks parsing is disabled, treat [ and ] as regular text.
const regularLiteral = wikilinks ? makeRegexParser(/^[^{}[\]$<\\]/) : makeRegexParser(/^[^{}$<\\]/)
const regularLiteralWithoutBar = wikilinks ? makeRegexParser(/^[^{}[\]$\\|]/) : makeRegexParser(/^[^{}$\\|]/)
const regularLiteralWithoutSpace = wikilinks ? makeRegexParser(/^[^{}[\]$\s]/) : makeRegexParser(/^[^{}$\s]/)
// There is a general pattern:
// parse a thing;
// if it worked, apply transform,
// otherwise return null.
// But using this as a combinator seems to cause problems
// when combined with nOrMore().
// May be some scoping issue.
function transform (p, fn) {
return () => {
const result = p()
return result === null ? null : fn(result)
}
}
// Used to define "literals" within template parameters. The pipe
// character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literalWithoutBar () {
const result = nOrMore(1, escapedOrLiteralWithoutBar)()
return result === null ? null : result.join('')
}
// Used to define "literals" within template parameters.
// The pipe character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literal () {
const result = nOrMore(1, escapedOrRegularLiteral)()
return result === null ? null : result.join('')
}
const escapedOrLiteralWithoutSpace = choice([
escapedLiteral,
regularLiteralWithoutSpace
])
// Used to define "literals" without spaces, in space-delimited situations
function literalWithoutSpace () {
const result = nOrMore(1, escapedOrLiteralWithoutSpace)()
return result === null ? null : result.join('')
}
function escapedLiteral () {
const result = sequence([backslash, anyCharacter])
return result === null ? null : result[1]
}
choice([escapedLiteral, regularLiteralWithoutSpace])
const escapedOrLiteralWithoutBar = choice([escapedLiteral, regularLiteralWithoutBar])
const escapedOrRegularLiteral = choice([escapedLiteral, regularLiteral])
function replacement () {
const result = sequence([dollar, digits])
if (result === null) {
return null
}
return ['REPLACE', parseInt(result[1], 10) - 1]
}
const templateName = transform(
// see $wgLegalTitleChars
// not allowing : due to the need to catch "PLURAL:$1"
makeRegexParser(/^[ !"$&'()*,./0-9;=?@A-Z^_`a-z~\x80-\xFF+-]+/),
function (result) {
return result.toString()
}
)
function templateParam () {
const result = sequence([pipe, nOrMore(0, paramExpression)])
if (result === null) |
const expr = result[1]
// use a "CONCAT" operator if there are multiple nodes,
// otherwise return the first node, raw.
return expr.length > 1 ? ['CONCAT'].concat(expr) : expr[0]
}
function templateWithReplacement () {
const result = sequence([templateName, colon, replacement])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutReplacement () {
const result = sequence([templateName, colon, paramExpression])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutFirstParameter () {
const result = sequence([templateName, colon])
return result === null ? null : [result[0], '']
}
const templateContents = choice([
function () {
const res = sequence([
// templates can have placeholders for dynamic
// replacement eg: {{PLURAL:$1|one car|$1 cars}}
// or no placeholders eg:{{GRAMMAR:genitive|{{SITENAME}}}
// Templates can also have empty first param eg:{{GENDER:|A|B|C}}
// to indicate current user in the context. We need to parse them without
// error, but can only fallback to gender neutral form.
choice([templateWithReplacement, templateWithOutReplacement, templateWithOutFirstParameter]),
nOrMore(0, templateParam)
])
return res === null ? null : res[0].concat(res[1])
},
function () {
const res = sequence([templateName, nOrMore(0, templateParam)])
if (res === null) {
return null
}
return [res[0]].concat(res[1])
}
])
const openTemplate = makeStringParser('{{')
const closeTemplate = makeStringParser('}}')
const openWikilink = makeStringParser('[[')
const closeWikilink = makeStringParser(']]')
const openExtlink = makeStringParser('[')
const closeExtlink = makeStringParser(']')
/**
* An expression in the form of {{...}}
*/
function template () {
const result = sequence([openTemplate, templateContents, closeTemplate])
return result === null ? null : result[1]
}
function pipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression),
pipe,
nOrMore(1, expression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0]),
['CONCAT'].concat(result[2])
]
}
function unpipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0])
]
}
const wikilinkContents = choice([
pipedWikilink,
unpipedWikilink
])
function wikilink () {
let result = null
const parsedResult = sequence([
openWikilink,
wikilinkContents,
closeWikilink
])
if (parsedResult !== null) {
const parsedLinkContents = parsedResult[1]
result = ['WIKILINK'].concat(parsedLinkContents)
}
return result
}
// this extlink MUST have inner contents, e.g. [foo] not allowed; [foo bar] [foo <i>bar</i>], etc. are allowed
function extlink () {
let result = null
const parsedResult = sequence([
openExtlink,
nOrMore(1, nonWhitespaceExpression),
whitespace,
nOrMore(1, expression),
closeExtlink
])
if (parsedResult !== null) {
// When the entire link target is a single parameter, we can't use CONCAT, as we allow
// passing fancy parameters (like a whole jQuery object or a function) to use for the
// link. Check only if it's a single match, since we can either do CONCAT or not for
// singles with the same effect.
const target = parsedResult[1].length === 1
? parsedResult[1][0]
: ['CONCAT'].concat(parsedResult[1])
result = [
'EXTLINK',
target,
['CONCAT'].concat(parsedResult[3])
]
}
return result
}
const asciiAlphabetLiteral = makeRegexParser(/^[A-Za-z]+/)
/**
* Checks if HTML is allowed
*
* @param {string} startTagName HTML start tag name
* @param {string} endTagName HTML start tag name
* @param {Object} attributes array of consecutive key value pairs,
* with index 2 * n being a name and 2 * n + 1 the associated value
* @return {boolean} true if this is HTML is allowed, false otherwise
*/
function isAllowedHtml (startTagName, endTagName, attributes, settings = {
// Whitelist for allowed HTML elements in wikitext.
// Self-closing tags are not currently supported.
allowedHtmlElements: ['b', 'bdi', 'del', 'i', 'ins', 'u', 'font', 'big', 'small', 'sub',
'sup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'cite', 'code', 'em', 's', 'strike', 'strong',
'tt', 'var', 'div', 'center', 'blockquote', 'ol', 'ul', 'dl', 'table', 'caption', 'pre',
'ruby', 'rb', 'rp', 'rt', 'rtc', 'p', 'span', 'abbr', 'dfn', 'kbd', 'samp', 'data', 'time',
'mark', 'li', 'dt', 'dd'],
// Key tag name, value allowed attributes for that tag.
// Sourced from Parsoid's Sanitizer::setupAttributeWhitelist
allowedHtmlCommonAttributes: [
// HTML
'id',
'class',
'style',
'lang',
'dir',
'title',
// WAI-ARIA
'aria-describedby',
'aria-flowto',
'aria-hidden',
'aria-label',
'aria-labelledby',
'aria-owns',
'role',
// RDFa
// These attributes are specified in section 9 of
// https://www.w3.org/TR/2008/REC-rdfa-syntax-20081014
'about',
'property',
'resource',
'datatype',
'typeof',
// Microdata. These are specified by
// https://html.spec.whatwg.org/multipage/microdata.html#the-microdata-model
'itemid',
'itemprop',
'itemref',
'itemscope',
'itemtype'
],
// Attributes allowed for specific elements.
// Key is element name in lower case
// Value is array of allowed attributes for that element
allowedHtmlAttributesByElement: {}
}) {
startTagName = startTagName.toLowerCase()
endTagName = endTagName.toLowerCase()
if (startTagName !== endTagName || settings.allowedHtmlElements.indexOf(startTagName) === -1) {
return false
}
const badStyle = /[\000-\010\013\016-\037\177]|expression|filter\s*:|accelerator\s*:|-o-link\s*:|-o-link-source\s*:|-o-replace\s*:|url\s*\(|image\s*\(|image-set\s*\(/i
for (let i = 0, len = attributes.length; i < len; i += 2) {
const attributeName = attributes[i]
if (settings.allowedHtmlCommonAttributes.indexOf(attributeName) === -1 &&
(settings.allowedHtmlAttributesByElement[startTagName] || []).indexOf(attributeName) === -1) {
return false
}
if (attributeName === 'style' && attributes[i + 1].search(badStyle) !== -1) {
return false
}
}
return true
}
function doubleQuotedHtmlAttributeValue () {
const htmlDoubleQuoteAttributeValue = makeRegexParser(/^[^"]*/)
const parsedResult = sequence([
doubleQuote,
htmlDoubleQuoteAttributeValue,
doubleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function singleQuotedHtmlAttributeValue () {
const htmlSingleQuoteAttributeValue = makeRegexParser(/^[^']*/)
const parsedResult = sequence([
singleQuote,
htmlSingleQuoteAttributeValue,
singleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function htmlAttribute () {
const htmlAttributeEquals = makeRegexParser(/^\s*=\s*/)
const parsedResult = sequence([
whitespace,
asciiAlphabetLiteral,
htmlAttributeEquals,
choice([
doubleQuotedHtmlAttributeValue,
singleQuotedHtmlAttributeValue
])
])
return parsedResult === null ? null : [parsedResult[1], parsedResult[3]]
}
function htmlAttributes () {
const parsedResult = nOrMore(0, htmlAttribute)()
// Un-nest attributes array due to structure of emitter operations.
return Array.prototype.concat.apply(['HTMLATTRIBUTES'], parsedResult)
}
// Parse, validate and escape HTML content in messages using a whitelisted tag names
// and attributes.
function html () {
let result = null
// Break into three sequence calls. That should allow accurate reconstruction of the original HTML, and requiring an exact tag name match.
// 1. open through closeHtmlTag
// 2. expression
// 3. openHtmlEnd through close
// This will allow recording the positions to reconstruct if HTML is to be treated as text.
const startOpenTagPos = pos
const openHtmlStartTag = makeStringParser('<')
const optionalForwardSlash = makeRegexParser(/^\/?/)
const closeHtmlTag = makeRegexParser(/^\s*>/)
const parsedOpenTagResult = sequence([
openHtmlStartTag,
asciiAlphabetLiteral,
htmlAttributes,
optionalForwardSlash,
closeHtmlTag
])
if (parsedOpenTagResult === null) {
return null
}
const endOpenTagPos = pos
const startTagName = parsedOpenTagResult[1]
const parsedHtmlContents = nOrMore(0, expression)()
const startCloseTagPos = pos
const openHtmlEndTag = makeStringParser('</')
const parsedCloseTagResult = sequence([
openHtmlEndTag,
asciiAlphabetLiteral,
closeHtmlTag
])
if (parsedCloseTagResult === null) {
// Closing tag failed. Return the start tag and contents.
return ['CONCAT', message.slice(startOpenTagPos, endOpenTagPos)]
.concat(parsedHtmlContents)
}
const endCloseTagPos = pos
const endTagName = parsedCloseTagResult[1]
const wrappedAttributes = parsedOpenTagResult[2]
const attributes = wrappedAttributes.slice(1)
if (isAllowedHtml(startTagName, endTagName, attributes)) {
result = ['HTMLELEMENT', startTagName, wrappedAttributes]
.concat(parsedHtmlContents)
} else {
// HTML is not allowed, so contents will remain how
// it was, while HTML markup at this level will be
// treated as text
// E.g. assuming script tags are not allowed:
//
// <script>[[Foo|bar]]</script>
//
// results in '<script>' and '</script>'
// (not treated as an HTML tag), surrounding a fully
// parsed HTML link.
//
// Concatenate everything from the tag, flattening the contents.
const escapeHTML = (unsafeContent) => unsafeContent
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''')
result = ['CONCAT', escapeHTML(message.slice(startOpenTagPos, endOpenTagPos))]
.concat(parsedHtmlContents, escapeHTML(message.slice(startCloseTagPos, endCloseTagPos)))
}
return result
}
const nonWhitespaceExpression = choice([
template,
replacement,
wikilink,
extlink,
literalWithoutSpace
])
const expression = choice([
template,
replacement,
wikilink,
extlink,
html,
literal
])
const paramExpression = choice([template, replacement, literalWithoutBar])
function start () {
const result = nOrMore(0, expression)()
if (result === null) {
return null
}
return ['CONCAT'].concat(result)
}
const result = start()
/*
* For success, the pos must have gotten to the end of the input
* and returned a non-null.
* n.b. This is part of language infrastructure, so we do not throw an internationalizable message.
*/
if (result === null || pos !== message.length) {
throw new Error('Parse error at position ' + pos.toString() + ' in input: ' + message)
}
return result
}
| {
return null
} | conditional_block |
capturehost.py | #!/usr/bin/env python3
import argparse
import socket
import asyncio
import subprocess
import time, os, sys
from PIL import Image
# The default camera parameters
# None marks them as undefined
# if defined they are (shutter, awb_1, awb_2, gain)
defaultCamParams = (33164, 1.5, 1.5, 4.8)
# ID -> ClienConnection
# Ordinary clients with camera for capturing
camera_clients = {}
# Additional clients for room periphrals (like servos)
aux_clients = {}
class ClientMsgError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientMsgError: {}".format(self.msg)
class ClientResultError(ClientMsgError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientResultError: {}".format(self.msg)
class ClientNotFoundError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientNotFoundError: {}".format(self.msg)
class ClientSocketError(Exception):
pass
# An instance of this class handles a connection to a single client.
# It offsers async functions to execute commands on the client.
# These functions wait until the command finishes.
# It may be a good idea to split this class into subclasses for the different
# client types. For now, all functions are in one class.
class ClientConnection:
def __init__(self, reader, writer):
self._reader = reader
self._writer = writer
self._buffer = str()
self._cid = None
# This function should be called first, it gets the ID of the client by reading the HI message form the socket
@asyncio.coroutine
def client_id(self):
if self._cid != None:
return self._cid
print("Waitng for client message")
msg = yield from self._get_next_message()
print(msg[0])
d = msg.split('|')
if len(d) != 2 or d[0] != 'HI':
print("Unexpected message from client\
(Expected HI message):\n%s" % msg)
self.remove_client()
raise ClientMsgError(msg)
if d[1].isdigit():
self._cid = int(d[1])
print("Camera client connected, id=%d" % self._cid)
else:
self._cid = d[1]
print("Auxiliary client connected, name=%s" % self._cid)
# confirm connection
yield from self._send("CON|%s~" % d[1])
return self._cid
def remove_client(self):
global camera_clients
global aux_clients
if self._cid in camera_clients.keys():
del camera_clients[self._cid]
if self._cid in aux_clients.keys():
del aux_clients[self._cid]
self._writer.close()
# Motor client only: Set the position of the slide. pos is from 0 to 1
@asyncio.coroutine
def aux_set_slide(self, pos):
|
# Set the camera resolution
# mode = (w, h, fps)
@asyncio.coroutine
def set_resolution(self, mode):
yield from self._send("RES|%d:%d:%d~" % mode)
yield from self._wait_for_ok("RES")
# Take a single image (and store it as <name>)
@asyncio.coroutine
def take_image(self, name):
yield from self._send("SAM|%s~" % name)
yield from self._wait_for_ok("SAM")
# Starts the recording
@asyncio.coroutine
def start_video(self, name):
yield from self._send("REC|%s~" % name)
# Stops the recording
@asyncio.coroutine
def stop_video(self):
yield from self._send("STP|~")
# Starts the stream
@asyncio.coroutine
def start_stream(self):
yield from self._send("STR|%s~")
# Stops the stream
@asyncio.coroutine
def stop_stream(self):
yield from self._send("STSTP|~")
# Automatic camera parameters
@asyncio.coroutine
def param_auto(self):
yield from self._send("CAP|~")
yield from self._wait_for_ok("CAP")
# Lock camera parameters to the current values
@asyncio.coroutine
def param_lock(self):
yield from self._send("CFP|~")
yield from self._wait_for_ok("CFP")
# Set the camera parameters
@asyncio.coroutine
def param_set(self, params):
yield from self._send("CSP|%i:%f:%f:%f~" % params)
yield from self._wait_for_ok("CSP")
# Get the current parameters form a camera
@asyncio.coroutine
def get_cam_params(self, mode):
yield from self.param_auto()
yield from self.set_resolution(mode)
print("Resolution set! Wait 5 sec for exposure values to adapt")
yield from asyncio.sleep(5)
yield from self.param_lock()
yield from self._send("CGP|~")
status, data = yield from self._get_resposne()
if status != "CP":
print("Unexpected response to parameter query: %s" % status)
raise ClientResultError(status)
d = data.split(":")
if (len(d) != 4 or (not isFloat(d[0])) or (not isFloat(d[1])) or
(not isFloat(d[2])) or (not isFloat(d[3]))):
print("Invalid CP message: %s" % data)
raise ClientMsgError(data)
params = (int(d[0]), float(d[1]), float(d[2]), float(d[3]))
return params
# Internal: Read next message form the socket
@asyncio.coroutine
def _get_next_message(self):
while True:
delim = self._buffer.find('~')
if delim != -1:
res = self._buffer[:delim]
self._buffer = self._buffer[delim + 1:]
return res.strip('\r\n')
try:
data = yield from self._reader.read(100)
self._buffer += data.decode('ascii')
except Exception as e:
print(e)
self.remove_client()
raise ClientSocketError()
# Internal: Gets the response for a command
@asyncio.coroutine
def _get_resposne(self):
msg = yield from self._get_next_message()
delim = msg.find("|")
if (msg == "" or delim == -1):
# format invalid
print("Received invalid response: %s" % msg)
raise ClientMsgError(msg)
# status ~ code of the message
# data ~ data of the message
status = msg[0:delim]
data = msg[delim + 1:]
return (status, data)
# Wait for a 'OK' result
@asyncio.coroutine
def _wait_for_ok(self, command):
status, data = yield from self._get_resposne()
if status != "OK" or data != command:
raise ClientResultError(command)
# Send a command
@asyncio.coroutine
def _send(self, msg):
self._writer.write(msg.encode('ascii'))
yield from self._writer.drain()
# Calls the given function with the given paramerters on all clients and waits for the results
@asyncio.coroutine
def command_all_clients(function, *args):
coros = []
for client in camera_clients.values():
if client != None:
coros += [function(client, *args)]
yield from asyncio.gather(*coros)
# Take an image with all cameras
@asyncio.coroutine
def take_client_images(filename, resolution=None, params=None):
if resolution != None:
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
if params != None:
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Take image")
yield from command_all_clients(ClientConnection.take_image, filename)
# Starts the capture on all cameras
@asyncio.coroutine
def start_capture(filename, resolution, params):
#print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
#print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
#print("Starting video")
yield from command_all_clients(ClientConnection.start_video, filename)
# Stops the capture
@asyncio.coroutine
def stop_capture():
#print("Stop video")
yield from command_all_clients(ClientConnection.stop_video)
#print("--> DONE <--")
# Move the marker to a given position and wait for a given time
@asyncio.coroutine
def move_marker(pos, wait=5):
print("Move marker to %f" % pos)
if not "CS" in aux_clients.keys():
raise ClientNotFoundError("Callibration silde client is not connected!")
yield from aux_clients["CS"].aux_set_slide(pos)
if wait > 0:
print("OK, wait %i sec for marker to stop wobbling" % wait)
yield from asyncio.sleep(wait)
# Execute the calibration
# Move the marker step-by-step and take images
@asyncio.coroutine
def do_calibration(resolution, steps, params):
print("Start calibartion run")
#params = (1000, 1, 1, 1.5);
#defaultCamParams = (33164, 1.5, 1.5, 4.8)
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
print("Now start moving marker")
for p in range(steps + 1):
cur = p / float(steps)
yield from move_marker(cur)
yield from take_client_images("/home/pi/flow.df/calib_%02d.jpg" % p)
print("Returning marker to home position")
yield from move_marker(0, 0)
# Starts the stream on a given client (UNTESTED)
@asyncio.coroutine
def do_streaming(client):
#open socket
listening_sock = socket.socket()
listening_sock.bind(('0.0.0.0', 8000))
listening_sock.listen(0)
#send msg to cam to start stream
yield from client.start_stream()
#accept connection from camera
connection = listening_sock.accept()[0].makefile('rb')
print("streaming connection opened")
try:
#open player
print("open player")
cmdline = ['mplayer', '-fps', '90', '-cache', '1024', '-']
player = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
print("player opened")
while True:
data = connection.read(1024)
#print "data received"
if not data:
print("no data")
break
player.stdin.write(data)
except:
print("data reading and writing to mplayer failed")
connection.close()
listening_sock.close()
player.terminate()
client.stop_stream()
print("Stream eneded")
# This is the 'main' function of this.
# In here all the CLI input processing is done and the commands are called.
@asyncio.coroutine
def handle_user_input(stdin, master_id, exposure_correction, cam_mode, loop=None):
global camera_clients
# the current camera parameters
currentParamData = defaultCamParams
while True:
line = yield from stdin.readline()
data = line.decode('ascii').strip('\r\n\t ')
if data == 'h':
# help
print("c Start capture")
print("e Get exposure values")
print("ec Set exposure correction value")
print("f Close / open valves to fill box with smoke")
print("p Take position image")
print("s Take sample image")
print("q quit")
print("l live video (requires mplayer)")
print("cal start calibration")
elif data.startswith('ec'):
p = data.split(' ')
if len(p) == 1:
# no argument -> print current value
print("exposure_correction = %f" % exposure_correction)
else:
# else: has argument -> check if valid
if not isFloat(p[1]) or float(p[1]) <= 0:
print("illegal value: %s" % p[1])
continue
# set new value and print
exposure_correction = float(p[1])
print("OK, exposure_correction = %f" % exposure_correction)
# print resulting gain (if parameters are set)
if currentParamData != None:
(s, a1, a2, g) = currentParamData
g = min(max(g * exposure_correction, 1), 12)
print("gain is now = %f" % g)
currentParamData = (s, a1, a2, g)
elif data == 'p':
# position image
try:
yield from take_client_images("/home/pi/flow.df/pos.jpg", (2592, 1944, 1))
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("--> DONE <--")
elif data == 's':
print("Take sample image")
try:
yield from take_client_images("/home/pi/flow.df/sample.jpg", cam_mode, currentParamData)
except (ClientMsgError, ClientSocketError) as err:
print(err)
sampleFilename = '/home/student/frejek/rpi/%d/sample.jpg'
images = []
for i in range(0,5):
im = Image.open(sampleFilename%(i+1))
im = im.transpose(Image.ROTATE_90)
#im.show()
images.append(im)
new_im = Image.new('L', (5*1080, 1920))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/home/student/frejek/samples.png')
size = 1.6*1080, 1.6*384
new_im.thumbnail(size, Image.ANTIALIAS)
new_im.show()
print("--> DONE <--")
elif data == 'e':
print("Get exposure values")
if (not master_id in camera_clients.keys()) or camera_clients[master_id] == None:
print(camera_clients.keys())
print("No client: %d (reference client id)" % master_id)
continue
try:
currentParamData = yield from camera_clients[master_id].get_cam_params(cam_mode)
except (ClientMsgError, ClientSocketError) as err:
print(err)
continue
(s, a1, a2, g) = currentParamData
g2 = min(max(g * exposure_correction, 1), 12)
currentParamData = (s, a1, a2, g2)
print("Camera params: shutter=%s, awb=(%s, %s), gain=%s, gain(corrected)=%f" % (s, a1, a2, g, g2))
elif data.startswith('c'):
arg = data.split(" ")
if arg[0] == 'c':
if len(arg) < 6:
print('C is missing additional arguments (videoFolder, percentageLower, numOfCaptures, smokeSecondsInit, smokeSecondsLoop)')
else:
print('')
print('---------------------------------------------------------')
print('Make sure there is enough smoke fluid in smoke machine!!!')
print('---------------------------------------------------------')
print('')
#print("Release smoke and record videos.")
if currentParamData == None:
# capture without parameters set is not allowed
print("Can't start capture without exposure data")
continue
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
videoFolder = arg[1]
percentageLower = arg[2]
numOfCaptures = int(arg[3])
smokeSecondsInit = arg[4]
smokeSecondsLoop = arg[5]
print("Fill box with smoke, init.")
subprocess.call(['./fillBox.sh', smokeSecondsInit])
print("Wait 150s for smoke to disappear and people leaving the room.")
time.sleep(150)
# loop for each capture process
numberNaming = 0
for capture in range(numOfCaptures):
try:
print("Start capture.")
yield from start_capture("rec.h264", cam_mode, currentParamData)
time.sleep(5)
print("Open valves.")
subprocess.call(['./servocontrol.sh', 'plume', percentageLower])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print('Capture in progress.')
time.sleep(18)
try:
print("Stopping capture.")
yield from stop_capture()
print("Closing valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Download videos.")
while os.path.isdir(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)):
numberNaming = numberNaming+1
if numberNaming>9999:
print('Folder exists: %s'%(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)))
break
subprocess.call(['./grabVideos.sh', videoFolder+'_%s_%04d' % (percentageLower, numberNaming)])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("Done with capture %04d."%capture)
if capture+1<numOfCaptures:
time.sleep(60)
print("Close valves to fill box with smoke.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Fill box with smoke, wait for smoke to disappear, loop.")
subprocess.call(['./fillBox.sh', smokeSecondsLoop])
time.sleep(180)
print("Done with all captures 'c'.")
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
elif arg[0] == 'cal':
if len(arg) < 2:
print('CAL is missing additional argument (calibFolder)')
else:
calibFolder = arg[1]
try:
yield from do_calibration((2592, 1944, 1), 20, currentParamData)
print("--> DONE CAL <--")
print("--> gather calibs <--")
subprocess.call(['./gather_calib.sh', calibFolder])
print("--> process images <--")
subprocess.Popen(['./calibration/process_images.sh', calibFolder, '--ed=1000'])
print("--> DONE process images <--")
except (ClientMsgError, ClientSocketError, ClientNotFoundError) as err:
print(err)
else:
print("Unknown command: ", data)
elif data.startswith('l'):
arg = data.split(' ')
if len(arg) == 1:
# no argument -> print current value
print("please add which client should be streamed")
continue
else:
# else: has argument -> check if valid
if not arg[1].isdigit():
print("Not a decimal number: %s" % arg[1])
continue
client_num = int(arg[1])
if not client_num in camera_clients.keys() or camera_clients[client_num] == None:
print("No client with id: %d (reference client id)" % client_num)
continue
try:
yield from do_streaming(camera_clients[client_num])
except (ClientMsgError, ClientSocketError) as err:
print(err)
elif data == 'q':
# q is same as CRTL+c
raise KeyboardInterrupt
else:
print("Unknown command: ", data)
# checks if a value can be interpreted as a float
# fails with exponential values, but they do not occure here
def isFloat(str):
return str.replace(".", "", 1).isdigit()
# Handles a connection from a new client
@asyncio.coroutine
def handle_connection(reader, writer):
global camera_clients
global aux_clients
try:
c = ClientConnection(reader, writer)
new_client_id = yield from c.client_id()
if type(new_client_id) is int:
camera_clients[new_client_id] = c
else:
aux_clients[new_client_id] = c
except (ClientMsgError, ClientSocketError) as e:
print(e)
print("Failed to establish connection")
return
# Returns async reader for stdio
@asyncio.coroutine
def setup_stdio_reader(loop=None):
if not loop:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
yield from loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader
def main(argv):
parser = argparse.ArgumentParser(
description='Host program for smoke capture camera system')
parser.add_argument('-r', '--reference', type=int, default=1,
help='Client id of the reference client (used for\
parameter synchronization)')
parser.add_argument('-W', '--width', type=int, default=1296,
help='Width (resolution) of the recorded video')
parser.add_argument('-H', '--height', type=int, default=972,
help='Height (resolution) of the recorded video')
parser.add_argument('-F', '--fps', type=int, default=30,
help='Frame rate of the recorded video')
#parser.add_argument('-b', '--bind', default='131.159.40.51',
# help='Bind address used for the listening socket')
parser.add_argument('-e', '--exposure_correction', default='0.8',
type=float,
help='Defines the initial value for the gain\
correction (this value is multilied with the\
current gain)')
args = parser.parse_args()
master_id = args.reference
default_ec = args.exposure_correction
cam_mode = (args.width, args.height, args.fps)
loop = asyncio.get_event_loop()
stdin_reader = loop.run_until_complete(setup_stdio_reader(loop=loop))
input_coro = handle_user_input(stdin_reader, master_id, default_ec, cam_mode, loop=loop)
server_coro = asyncio.start_server(handle_connection, '0.0.0.0', 54321, loop=loop)
server = loop.run_until_complete(server_coro)
try:
loop.run_until_complete(input_coro)
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == "__main__":
main(sys.argv)
| yield from self._send("SET|%f~" % pos)
yield from self._wait_for_ok("SET") | identifier_body |
capturehost.py | #!/usr/bin/env python3
import argparse
import socket
import asyncio
import subprocess
import time, os, sys
from PIL import Image
# The default camera parameters
# None marks them as undefined
# if defined they are (shutter, awb_1, awb_2, gain)
defaultCamParams = (33164, 1.5, 1.5, 4.8)
# ID -> ClienConnection
# Ordinary clients with camera for capturing
camera_clients = {}
# Additional clients for room periphrals (like servos)
aux_clients = {}
class ClientMsgError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientMsgError: {}".format(self.msg)
class ClientResultError(ClientMsgError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientResultError: {}".format(self.msg)
class ClientNotFoundError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientNotFoundError: {}".format(self.msg)
class ClientSocketError(Exception):
pass
# An instance of this class handles a connection to a single client.
# It offsers async functions to execute commands on the client.
# These functions wait until the command finishes.
# It may be a good idea to split this class into subclasses for the different
# client types. For now, all functions are in one class.
class ClientConnection:
def __init__(self, reader, writer):
self._reader = reader
self._writer = writer
self._buffer = str()
self._cid = None
# This function should be called first, it gets the ID of the client by reading the HI message form the socket
@asyncio.coroutine
def client_id(self):
if self._cid != None:
return self._cid
print("Waitng for client message")
msg = yield from self._get_next_message()
print(msg[0])
d = msg.split('|')
if len(d) != 2 or d[0] != 'HI':
print("Unexpected message from client\
(Expected HI message):\n%s" % msg)
self.remove_client()
raise ClientMsgError(msg)
if d[1].isdigit():
self._cid = int(d[1])
print("Camera client connected, id=%d" % self._cid)
else:
self._cid = d[1]
print("Auxiliary client connected, name=%s" % self._cid)
# confirm connection
yield from self._send("CON|%s~" % d[1])
return self._cid
def remove_client(self):
global camera_clients
global aux_clients
if self._cid in camera_clients.keys():
del camera_clients[self._cid]
if self._cid in aux_clients.keys():
del aux_clients[self._cid]
self._writer.close()
# Motor client only: Set the position of the slide. pos is from 0 to 1
@asyncio.coroutine
def aux_set_slide(self, pos):
yield from self._send("SET|%f~" % pos)
yield from self._wait_for_ok("SET")
# Set the camera resolution
# mode = (w, h, fps)
@asyncio.coroutine
def set_resolution(self, mode):
yield from self._send("RES|%d:%d:%d~" % mode)
yield from self._wait_for_ok("RES")
# Take a single image (and store it as <name>)
@asyncio.coroutine
def take_image(self, name):
yield from self._send("SAM|%s~" % name)
yield from self._wait_for_ok("SAM")
# Starts the recording
@asyncio.coroutine
def start_video(self, name):
yield from self._send("REC|%s~" % name)
# Stops the recording
@asyncio.coroutine
def stop_video(self):
yield from self._send("STP|~")
# Starts the stream
@asyncio.coroutine
def start_stream(self):
yield from self._send("STR|%s~")
# Stops the stream
@asyncio.coroutine
def stop_stream(self):
yield from self._send("STSTP|~")
# Automatic camera parameters
@asyncio.coroutine
def param_auto(self):
yield from self._send("CAP|~")
yield from self._wait_for_ok("CAP")
# Lock camera parameters to the current values
@asyncio.coroutine
def param_lock(self):
yield from self._send("CFP|~")
yield from self._wait_for_ok("CFP")
# Set the camera parameters
@asyncio.coroutine
def param_set(self, params):
yield from self._send("CSP|%i:%f:%f:%f~" % params)
yield from self._wait_for_ok("CSP")
# Get the current parameters form a camera
@asyncio.coroutine
def get_cam_params(self, mode):
yield from self.param_auto()
yield from self.set_resolution(mode)
print("Resolution set! Wait 5 sec for exposure values to adapt")
yield from asyncio.sleep(5)
yield from self.param_lock()
yield from self._send("CGP|~")
status, data = yield from self._get_resposne()
if status != "CP":
print("Unexpected response to parameter query: %s" % status)
raise ClientResultError(status)
d = data.split(":")
if (len(d) != 4 or (not isFloat(d[0])) or (not isFloat(d[1])) or
(not isFloat(d[2])) or (not isFloat(d[3]))):
print("Invalid CP message: %s" % data)
raise ClientMsgError(data)
params = (int(d[0]), float(d[1]), float(d[2]), float(d[3]))
return params
# Internal: Read next message form the socket
@asyncio.coroutine
def _get_next_message(self):
while True:
delim = self._buffer.find('~')
if delim != -1:
res = self._buffer[:delim]
self._buffer = self._buffer[delim + 1:]
return res.strip('\r\n')
try:
data = yield from self._reader.read(100)
self._buffer += data.decode('ascii')
except Exception as e:
print(e)
self.remove_client()
raise ClientSocketError()
# Internal: Gets the response for a command
@asyncio.coroutine
def _get_resposne(self):
msg = yield from self._get_next_message()
delim = msg.find("|")
if (msg == "" or delim == -1):
# format invalid
print("Received invalid response: %s" % msg)
raise ClientMsgError(msg)
# status ~ code of the message
# data ~ data of the message
status = msg[0:delim]
data = msg[delim + 1:]
return (status, data)
# Wait for a 'OK' result
@asyncio.coroutine
def _wait_for_ok(self, command):
status, data = yield from self._get_resposne()
if status != "OK" or data != command:
raise ClientResultError(command)
# Send a command
@asyncio.coroutine
def _send(self, msg):
self._writer.write(msg.encode('ascii'))
yield from self._writer.drain()
# Calls the given function with the given paramerters on all clients and waits for the results
@asyncio.coroutine
def command_all_clients(function, *args):
coros = []
for client in camera_clients.values():
if client != None:
coros += [function(client, *args)]
yield from asyncio.gather(*coros)
# Take an image with all cameras
@asyncio.coroutine
def take_client_images(filename, resolution=None, params=None):
if resolution != None:
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
if params != None:
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Take image")
yield from command_all_clients(ClientConnection.take_image, filename)
# Starts the capture on all cameras
@asyncio.coroutine
def start_capture(filename, resolution, params):
#print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
#print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
#print("Starting video")
yield from command_all_clients(ClientConnection.start_video, filename)
# Stops the capture
@asyncio.coroutine
def stop_capture():
#print("Stop video")
yield from command_all_clients(ClientConnection.stop_video)
#print("--> DONE <--")
# Move the marker to a given position and wait for a given time
@asyncio.coroutine
def move_marker(pos, wait=5):
print("Move marker to %f" % pos)
if not "CS" in aux_clients.keys():
raise ClientNotFoundError("Callibration silde client is not connected!")
yield from aux_clients["CS"].aux_set_slide(pos)
if wait > 0:
print("OK, wait %i sec for marker to stop wobbling" % wait)
yield from asyncio.sleep(wait)
# Execute the calibration
# Move the marker step-by-step and take images
@asyncio.coroutine
def do_calibration(resolution, steps, params):
print("Start calibartion run")
#params = (1000, 1, 1, 1.5);
#defaultCamParams = (33164, 1.5, 1.5, 4.8)
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
print("Now start moving marker")
for p in range(steps + 1):
cur = p / float(steps)
yield from move_marker(cur)
yield from take_client_images("/home/pi/flow.df/calib_%02d.jpg" % p)
print("Returning marker to home position")
yield from move_marker(0, 0)
# Starts the stream on a given client (UNTESTED)
@asyncio.coroutine
def | (client):
#open socket
listening_sock = socket.socket()
listening_sock.bind(('0.0.0.0', 8000))
listening_sock.listen(0)
#send msg to cam to start stream
yield from client.start_stream()
#accept connection from camera
connection = listening_sock.accept()[0].makefile('rb')
print("streaming connection opened")
try:
#open player
print("open player")
cmdline = ['mplayer', '-fps', '90', '-cache', '1024', '-']
player = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
print("player opened")
while True:
data = connection.read(1024)
#print "data received"
if not data:
print("no data")
break
player.stdin.write(data)
except:
print("data reading and writing to mplayer failed")
connection.close()
listening_sock.close()
player.terminate()
client.stop_stream()
print("Stream eneded")
# This is the 'main' function of this.
# In here all the CLI input processing is done and the commands are called.
@asyncio.coroutine
def handle_user_input(stdin, master_id, exposure_correction, cam_mode, loop=None):
global camera_clients
# the current camera parameters
currentParamData = defaultCamParams
while True:
line = yield from stdin.readline()
data = line.decode('ascii').strip('\r\n\t ')
if data == 'h':
# help
print("c Start capture")
print("e Get exposure values")
print("ec Set exposure correction value")
print("f Close / open valves to fill box with smoke")
print("p Take position image")
print("s Take sample image")
print("q quit")
print("l live video (requires mplayer)")
print("cal start calibration")
elif data.startswith('ec'):
p = data.split(' ')
if len(p) == 1:
# no argument -> print current value
print("exposure_correction = %f" % exposure_correction)
else:
# else: has argument -> check if valid
if not isFloat(p[1]) or float(p[1]) <= 0:
print("illegal value: %s" % p[1])
continue
# set new value and print
exposure_correction = float(p[1])
print("OK, exposure_correction = %f" % exposure_correction)
# print resulting gain (if parameters are set)
if currentParamData != None:
(s, a1, a2, g) = currentParamData
g = min(max(g * exposure_correction, 1), 12)
print("gain is now = %f" % g)
currentParamData = (s, a1, a2, g)
elif data == 'p':
# position image
try:
yield from take_client_images("/home/pi/flow.df/pos.jpg", (2592, 1944, 1))
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("--> DONE <--")
elif data == 's':
print("Take sample image")
try:
yield from take_client_images("/home/pi/flow.df/sample.jpg", cam_mode, currentParamData)
except (ClientMsgError, ClientSocketError) as err:
print(err)
sampleFilename = '/home/student/frejek/rpi/%d/sample.jpg'
images = []
for i in range(0,5):
im = Image.open(sampleFilename%(i+1))
im = im.transpose(Image.ROTATE_90)
#im.show()
images.append(im)
new_im = Image.new('L', (5*1080, 1920))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/home/student/frejek/samples.png')
size = 1.6*1080, 1.6*384
new_im.thumbnail(size, Image.ANTIALIAS)
new_im.show()
print("--> DONE <--")
elif data == 'e':
print("Get exposure values")
if (not master_id in camera_clients.keys()) or camera_clients[master_id] == None:
print(camera_clients.keys())
print("No client: %d (reference client id)" % master_id)
continue
try:
currentParamData = yield from camera_clients[master_id].get_cam_params(cam_mode)
except (ClientMsgError, ClientSocketError) as err:
print(err)
continue
(s, a1, a2, g) = currentParamData
g2 = min(max(g * exposure_correction, 1), 12)
currentParamData = (s, a1, a2, g2)
print("Camera params: shutter=%s, awb=(%s, %s), gain=%s, gain(corrected)=%f" % (s, a1, a2, g, g2))
elif data.startswith('c'):
arg = data.split(" ")
if arg[0] == 'c':
if len(arg) < 6:
print('C is missing additional arguments (videoFolder, percentageLower, numOfCaptures, smokeSecondsInit, smokeSecondsLoop)')
else:
print('')
print('---------------------------------------------------------')
print('Make sure there is enough smoke fluid in smoke machine!!!')
print('---------------------------------------------------------')
print('')
#print("Release smoke and record videos.")
if currentParamData == None:
# capture without parameters set is not allowed
print("Can't start capture without exposure data")
continue
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
videoFolder = arg[1]
percentageLower = arg[2]
numOfCaptures = int(arg[3])
smokeSecondsInit = arg[4]
smokeSecondsLoop = arg[5]
print("Fill box with smoke, init.")
subprocess.call(['./fillBox.sh', smokeSecondsInit])
print("Wait 150s for smoke to disappear and people leaving the room.")
time.sleep(150)
# loop for each capture process
numberNaming = 0
for capture in range(numOfCaptures):
try:
print("Start capture.")
yield from start_capture("rec.h264", cam_mode, currentParamData)
time.sleep(5)
print("Open valves.")
subprocess.call(['./servocontrol.sh', 'plume', percentageLower])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print('Capture in progress.')
time.sleep(18)
try:
print("Stopping capture.")
yield from stop_capture()
print("Closing valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Download videos.")
while os.path.isdir(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)):
numberNaming = numberNaming+1
if numberNaming>9999:
print('Folder exists: %s'%(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)))
break
subprocess.call(['./grabVideos.sh', videoFolder+'_%s_%04d' % (percentageLower, numberNaming)])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("Done with capture %04d."%capture)
if capture+1<numOfCaptures:
time.sleep(60)
print("Close valves to fill box with smoke.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Fill box with smoke, wait for smoke to disappear, loop.")
subprocess.call(['./fillBox.sh', smokeSecondsLoop])
time.sleep(180)
print("Done with all captures 'c'.")
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
elif arg[0] == 'cal':
if len(arg) < 2:
print('CAL is missing additional argument (calibFolder)')
else:
calibFolder = arg[1]
try:
yield from do_calibration((2592, 1944, 1), 20, currentParamData)
print("--> DONE CAL <--")
print("--> gather calibs <--")
subprocess.call(['./gather_calib.sh', calibFolder])
print("--> process images <--")
subprocess.Popen(['./calibration/process_images.sh', calibFolder, '--ed=1000'])
print("--> DONE process images <--")
except (ClientMsgError, ClientSocketError, ClientNotFoundError) as err:
print(err)
else:
print("Unknown command: ", data)
elif data.startswith('l'):
arg = data.split(' ')
if len(arg) == 1:
# no argument -> print current value
print("please add which client should be streamed")
continue
else:
# else: has argument -> check if valid
if not arg[1].isdigit():
print("Not a decimal number: %s" % arg[1])
continue
client_num = int(arg[1])
if not client_num in camera_clients.keys() or camera_clients[client_num] == None:
print("No client with id: %d (reference client id)" % client_num)
continue
try:
yield from do_streaming(camera_clients[client_num])
except (ClientMsgError, ClientSocketError) as err:
print(err)
elif data == 'q':
# q is same as CRTL+c
raise KeyboardInterrupt
else:
print("Unknown command: ", data)
# checks if a value can be interpreted as a float
# fails with exponential values, but they do not occure here
def isFloat(str):
return str.replace(".", "", 1).isdigit()
# Handles a connection from a new client
@asyncio.coroutine
def handle_connection(reader, writer):
global camera_clients
global aux_clients
try:
c = ClientConnection(reader, writer)
new_client_id = yield from c.client_id()
if type(new_client_id) is int:
camera_clients[new_client_id] = c
else:
aux_clients[new_client_id] = c
except (ClientMsgError, ClientSocketError) as e:
print(e)
print("Failed to establish connection")
return
# Returns async reader for stdio
@asyncio.coroutine
def setup_stdio_reader(loop=None):
if not loop:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
yield from loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader
def main(argv):
parser = argparse.ArgumentParser(
description='Host program for smoke capture camera system')
parser.add_argument('-r', '--reference', type=int, default=1,
help='Client id of the reference client (used for\
parameter synchronization)')
parser.add_argument('-W', '--width', type=int, default=1296,
help='Width (resolution) of the recorded video')
parser.add_argument('-H', '--height', type=int, default=972,
help='Height (resolution) of the recorded video')
parser.add_argument('-F', '--fps', type=int, default=30,
help='Frame rate of the recorded video')
#parser.add_argument('-b', '--bind', default='131.159.40.51',
# help='Bind address used for the listening socket')
parser.add_argument('-e', '--exposure_correction', default='0.8',
type=float,
help='Defines the initial value for the gain\
correction (this value is multilied with the\
current gain)')
args = parser.parse_args()
master_id = args.reference
default_ec = args.exposure_correction
cam_mode = (args.width, args.height, args.fps)
loop = asyncio.get_event_loop()
stdin_reader = loop.run_until_complete(setup_stdio_reader(loop=loop))
input_coro = handle_user_input(stdin_reader, master_id, default_ec, cam_mode, loop=loop)
server_coro = asyncio.start_server(handle_connection, '0.0.0.0', 54321, loop=loop)
server = loop.run_until_complete(server_coro)
try:
loop.run_until_complete(input_coro)
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == "__main__":
main(sys.argv)
| do_streaming | identifier_name |
capturehost.py | #!/usr/bin/env python3
import argparse
import socket
import asyncio
import subprocess
import time, os, sys
from PIL import Image
# The default camera parameters
# None marks them as undefined
# if defined they are (shutter, awb_1, awb_2, gain)
defaultCamParams = (33164, 1.5, 1.5, 4.8)
# ID -> ClienConnection
# Ordinary clients with camera for capturing
camera_clients = {}
# Additional clients for room periphrals (like servos)
aux_clients = {}
class ClientMsgError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientMsgError: {}".format(self.msg)
class ClientResultError(ClientMsgError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientResultError: {}".format(self.msg)
class ClientNotFoundError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientNotFoundError: {}".format(self.msg)
class ClientSocketError(Exception):
pass
# An instance of this class handles a connection to a single client.
# It offsers async functions to execute commands on the client.
# These functions wait until the command finishes.
# It may be a good idea to split this class into subclasses for the different
# client types. For now, all functions are in one class.
class ClientConnection:
def __init__(self, reader, writer):
self._reader = reader
self._writer = writer
self._buffer = str()
self._cid = None
# This function should be called first, it gets the ID of the client by reading the HI message form the socket
@asyncio.coroutine
def client_id(self):
if self._cid != None:
return self._cid
print("Waitng for client message")
msg = yield from self._get_next_message()
print(msg[0])
d = msg.split('|')
if len(d) != 2 or d[0] != 'HI':
print("Unexpected message from client\
(Expected HI message):\n%s" % msg)
self.remove_client()
raise ClientMsgError(msg)
if d[1].isdigit():
self._cid = int(d[1])
print("Camera client connected, id=%d" % self._cid)
else:
self._cid = d[1]
print("Auxiliary client connected, name=%s" % self._cid)
# confirm connection
yield from self._send("CON|%s~" % d[1])
return self._cid
def remove_client(self):
global camera_clients
global aux_clients
if self._cid in camera_clients.keys():
del camera_clients[self._cid]
if self._cid in aux_clients.keys():
del aux_clients[self._cid]
self._writer.close()
# Motor client only: Set the position of the slide. pos is from 0 to 1
@asyncio.coroutine
def aux_set_slide(self, pos):
yield from self._send("SET|%f~" % pos)
yield from self._wait_for_ok("SET")
# Set the camera resolution
# mode = (w, h, fps)
@asyncio.coroutine
def set_resolution(self, mode):
yield from self._send("RES|%d:%d:%d~" % mode)
yield from self._wait_for_ok("RES")
# Take a single image (and store it as <name>)
@asyncio.coroutine
def take_image(self, name):
yield from self._send("SAM|%s~" % name)
yield from self._wait_for_ok("SAM")
# Starts the recording
@asyncio.coroutine
def start_video(self, name):
yield from self._send("REC|%s~" % name)
# Stops the recording
@asyncio.coroutine
def stop_video(self):
yield from self._send("STP|~")
# Starts the stream
@asyncio.coroutine
def start_stream(self):
yield from self._send("STR|%s~")
# Stops the stream
@asyncio.coroutine
def stop_stream(self):
yield from self._send("STSTP|~")
# Automatic camera parameters
@asyncio.coroutine
def param_auto(self):
yield from self._send("CAP|~")
yield from self._wait_for_ok("CAP")
# Lock camera parameters to the current values
@asyncio.coroutine
def param_lock(self):
yield from self._send("CFP|~")
yield from self._wait_for_ok("CFP")
# Set the camera parameters
@asyncio.coroutine
def param_set(self, params):
yield from self._send("CSP|%i:%f:%f:%f~" % params)
yield from self._wait_for_ok("CSP")
# Get the current parameters form a camera
@asyncio.coroutine
def get_cam_params(self, mode):
yield from self.param_auto()
yield from self.set_resolution(mode)
print("Resolution set! Wait 5 sec for exposure values to adapt")
yield from asyncio.sleep(5)
yield from self.param_lock()
yield from self._send("CGP|~")
status, data = yield from self._get_resposne()
if status != "CP":
print("Unexpected response to parameter query: %s" % status)
raise ClientResultError(status)
d = data.split(":")
if (len(d) != 4 or (not isFloat(d[0])) or (not isFloat(d[1])) or
(not isFloat(d[2])) or (not isFloat(d[3]))):
print("Invalid CP message: %s" % data)
raise ClientMsgError(data)
params = (int(d[0]), float(d[1]), float(d[2]), float(d[3]))
return params
# Internal: Read next message form the socket
@asyncio.coroutine
def _get_next_message(self):
while True:
delim = self._buffer.find('~')
if delim != -1:
res = self._buffer[:delim]
self._buffer = self._buffer[delim + 1:]
return res.strip('\r\n')
try:
data = yield from self._reader.read(100)
self._buffer += data.decode('ascii')
except Exception as e:
print(e)
self.remove_client()
raise ClientSocketError()
# Internal: Gets the response for a command
@asyncio.coroutine
def _get_resposne(self):
msg = yield from self._get_next_message()
delim = msg.find("|")
if (msg == "" or delim == -1):
# format invalid
print("Received invalid response: %s" % msg)
raise ClientMsgError(msg)
# status ~ code of the message
# data ~ data of the message
status = msg[0:delim]
data = msg[delim + 1:]
return (status, data)
# Wait for a 'OK' result
@asyncio.coroutine
def _wait_for_ok(self, command):
status, data = yield from self._get_resposne()
if status != "OK" or data != command:
raise ClientResultError(command)
# Send a command
@asyncio.coroutine
def _send(self, msg):
self._writer.write(msg.encode('ascii'))
yield from self._writer.drain()
# Calls the given function with the given paramerters on all clients and waits for the results
@asyncio.coroutine
def command_all_clients(function, *args):
coros = []
for client in camera_clients.values():
if client != None:
coros += [function(client, *args)]
yield from asyncio.gather(*coros)
# Take an image with all cameras
@asyncio.coroutine
def take_client_images(filename, resolution=None, params=None):
if resolution != None:
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
if params != None:
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Take image")
yield from command_all_clients(ClientConnection.take_image, filename)
# Starts the capture on all cameras
@asyncio.coroutine
def start_capture(filename, resolution, params):
#print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
#print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
#print("Starting video")
yield from command_all_clients(ClientConnection.start_video, filename)
# Stops the capture
@asyncio.coroutine
def stop_capture():
#print("Stop video")
yield from command_all_clients(ClientConnection.stop_video)
#print("--> DONE <--")
# Move the marker to a given position and wait for a given time
@asyncio.coroutine
def move_marker(pos, wait=5):
print("Move marker to %f" % pos)
if not "CS" in aux_clients.keys():
raise ClientNotFoundError("Callibration silde client is not connected!")
yield from aux_clients["CS"].aux_set_slide(pos)
if wait > 0:
print("OK, wait %i sec for marker to stop wobbling" % wait)
yield from asyncio.sleep(wait)
# Execute the calibration
# Move the marker step-by-step and take images
@asyncio.coroutine
def do_calibration(resolution, steps, params):
print("Start calibartion run")
#params = (1000, 1, 1, 1.5);
#defaultCamParams = (33164, 1.5, 1.5, 4.8)
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
|
yield from take_client_images("/home/pi/flow.df/calib_%02d.jpg" % p)
print("Returning marker to home position")
yield from move_marker(0, 0)
# Starts the stream on a given client (UNTESTED)
@asyncio.coroutine
def do_streaming(client):
#open socket
listening_sock = socket.socket()
listening_sock.bind(('0.0.0.0', 8000))
listening_sock.listen(0)
#send msg to cam to start stream
yield from client.start_stream()
#accept connection from camera
connection = listening_sock.accept()[0].makefile('rb')
print("streaming connection opened")
try:
#open player
print("open player")
cmdline = ['mplayer', '-fps', '90', '-cache', '1024', '-']
player = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
print("player opened")
while True:
data = connection.read(1024)
#print "data received"
if not data:
print("no data")
break
player.stdin.write(data)
except:
print("data reading and writing to mplayer failed")
connection.close()
listening_sock.close()
player.terminate()
client.stop_stream()
print("Stream eneded")
# This is the 'main' function of this.
# In here all the CLI input processing is done and the commands are called.
@asyncio.coroutine
def handle_user_input(stdin, master_id, exposure_correction, cam_mode, loop=None):
global camera_clients
# the current camera parameters
currentParamData = defaultCamParams
while True:
line = yield from stdin.readline()
data = line.decode('ascii').strip('\r\n\t ')
if data == 'h':
# help
print("c Start capture")
print("e Get exposure values")
print("ec Set exposure correction value")
print("f Close / open valves to fill box with smoke")
print("p Take position image")
print("s Take sample image")
print("q quit")
print("l live video (requires mplayer)")
print("cal start calibration")
elif data.startswith('ec'):
p = data.split(' ')
if len(p) == 1:
# no argument -> print current value
print("exposure_correction = %f" % exposure_correction)
else:
# else: has argument -> check if valid
if not isFloat(p[1]) or float(p[1]) <= 0:
print("illegal value: %s" % p[1])
continue
# set new value and print
exposure_correction = float(p[1])
print("OK, exposure_correction = %f" % exposure_correction)
# print resulting gain (if parameters are set)
if currentParamData != None:
(s, a1, a2, g) = currentParamData
g = min(max(g * exposure_correction, 1), 12)
print("gain is now = %f" % g)
currentParamData = (s, a1, a2, g)
elif data == 'p':
# position image
try:
yield from take_client_images("/home/pi/flow.df/pos.jpg", (2592, 1944, 1))
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("--> DONE <--")
elif data == 's':
print("Take sample image")
try:
yield from take_client_images("/home/pi/flow.df/sample.jpg", cam_mode, currentParamData)
except (ClientMsgError, ClientSocketError) as err:
print(err)
sampleFilename = '/home/student/frejek/rpi/%d/sample.jpg'
images = []
for i in range(0,5):
im = Image.open(sampleFilename%(i+1))
im = im.transpose(Image.ROTATE_90)
#im.show()
images.append(im)
new_im = Image.new('L', (5*1080, 1920))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/home/student/frejek/samples.png')
size = 1.6*1080, 1.6*384
new_im.thumbnail(size, Image.ANTIALIAS)
new_im.show()
print("--> DONE <--")
elif data == 'e':
print("Get exposure values")
if (not master_id in camera_clients.keys()) or camera_clients[master_id] == None:
print(camera_clients.keys())
print("No client: %d (reference client id)" % master_id)
continue
try:
currentParamData = yield from camera_clients[master_id].get_cam_params(cam_mode)
except (ClientMsgError, ClientSocketError) as err:
print(err)
continue
(s, a1, a2, g) = currentParamData
g2 = min(max(g * exposure_correction, 1), 12)
currentParamData = (s, a1, a2, g2)
print("Camera params: shutter=%s, awb=(%s, %s), gain=%s, gain(corrected)=%f" % (s, a1, a2, g, g2))
elif data.startswith('c'):
arg = data.split(" ")
if arg[0] == 'c':
if len(arg) < 6:
print('C is missing additional arguments (videoFolder, percentageLower, numOfCaptures, smokeSecondsInit, smokeSecondsLoop)')
else:
print('')
print('---------------------------------------------------------')
print('Make sure there is enough smoke fluid in smoke machine!!!')
print('---------------------------------------------------------')
print('')
#print("Release smoke and record videos.")
if currentParamData == None:
# capture without parameters set is not allowed
print("Can't start capture without exposure data")
continue
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
videoFolder = arg[1]
percentageLower = arg[2]
numOfCaptures = int(arg[3])
smokeSecondsInit = arg[4]
smokeSecondsLoop = arg[5]
print("Fill box with smoke, init.")
subprocess.call(['./fillBox.sh', smokeSecondsInit])
print("Wait 150s for smoke to disappear and people leaving the room.")
time.sleep(150)
# loop for each capture process
numberNaming = 0
for capture in range(numOfCaptures):
try:
print("Start capture.")
yield from start_capture("rec.h264", cam_mode, currentParamData)
time.sleep(5)
print("Open valves.")
subprocess.call(['./servocontrol.sh', 'plume', percentageLower])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print('Capture in progress.')
time.sleep(18)
try:
print("Stopping capture.")
yield from stop_capture()
print("Closing valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Download videos.")
while os.path.isdir(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)):
numberNaming = numberNaming+1
if numberNaming>9999:
print('Folder exists: %s'%(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)))
break
subprocess.call(['./grabVideos.sh', videoFolder+'_%s_%04d' % (percentageLower, numberNaming)])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("Done with capture %04d."%capture)
if capture+1<numOfCaptures:
time.sleep(60)
print("Close valves to fill box with smoke.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Fill box with smoke, wait for smoke to disappear, loop.")
subprocess.call(['./fillBox.sh', smokeSecondsLoop])
time.sleep(180)
print("Done with all captures 'c'.")
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
elif arg[0] == 'cal':
if len(arg) < 2:
print('CAL is missing additional argument (calibFolder)')
else:
calibFolder = arg[1]
try:
yield from do_calibration((2592, 1944, 1), 20, currentParamData)
print("--> DONE CAL <--")
print("--> gather calibs <--")
subprocess.call(['./gather_calib.sh', calibFolder])
print("--> process images <--")
subprocess.Popen(['./calibration/process_images.sh', calibFolder, '--ed=1000'])
print("--> DONE process images <--")
except (ClientMsgError, ClientSocketError, ClientNotFoundError) as err:
print(err)
else:
print("Unknown command: ", data)
elif data.startswith('l'):
arg = data.split(' ')
if len(arg) == 1:
# no argument -> print current value
print("please add which client should be streamed")
continue
else:
# else: has argument -> check if valid
if not arg[1].isdigit():
print("Not a decimal number: %s" % arg[1])
continue
client_num = int(arg[1])
if not client_num in camera_clients.keys() or camera_clients[client_num] == None:
print("No client with id: %d (reference client id)" % client_num)
continue
try:
yield from do_streaming(camera_clients[client_num])
except (ClientMsgError, ClientSocketError) as err:
print(err)
elif data == 'q':
# q is same as CRTL+c
raise KeyboardInterrupt
else:
print("Unknown command: ", data)
# checks if a value can be interpreted as a float
# fails with exponential values, but they do not occure here
def isFloat(str):
return str.replace(".", "", 1).isdigit()
# Handles a connection from a new client
@asyncio.coroutine
def handle_connection(reader, writer):
global camera_clients
global aux_clients
try:
c = ClientConnection(reader, writer)
new_client_id = yield from c.client_id()
if type(new_client_id) is int:
camera_clients[new_client_id] = c
else:
aux_clients[new_client_id] = c
except (ClientMsgError, ClientSocketError) as e:
print(e)
print("Failed to establish connection")
return
# Returns async reader for stdio
@asyncio.coroutine
def setup_stdio_reader(loop=None):
if not loop:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
yield from loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader
def main(argv):
parser = argparse.ArgumentParser(
description='Host program for smoke capture camera system')
parser.add_argument('-r', '--reference', type=int, default=1,
help='Client id of the reference client (used for\
parameter synchronization)')
parser.add_argument('-W', '--width', type=int, default=1296,
help='Width (resolution) of the recorded video')
parser.add_argument('-H', '--height', type=int, default=972,
help='Height (resolution) of the recorded video')
parser.add_argument('-F', '--fps', type=int, default=30,
help='Frame rate of the recorded video')
#parser.add_argument('-b', '--bind', default='131.159.40.51',
# help='Bind address used for the listening socket')
parser.add_argument('-e', '--exposure_correction', default='0.8',
type=float,
help='Defines the initial value for the gain\
correction (this value is multilied with the\
current gain)')
args = parser.parse_args()
master_id = args.reference
default_ec = args.exposure_correction
cam_mode = (args.width, args.height, args.fps)
loop = asyncio.get_event_loop()
stdin_reader = loop.run_until_complete(setup_stdio_reader(loop=loop))
input_coro = handle_user_input(stdin_reader, master_id, default_ec, cam_mode, loop=loop)
server_coro = asyncio.start_server(handle_connection, '0.0.0.0', 54321, loop=loop)
server = loop.run_until_complete(server_coro)
try:
loop.run_until_complete(input_coro)
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == "__main__":
main(sys.argv) | print("Now start moving marker")
for p in range(steps + 1):
cur = p / float(steps)
yield from move_marker(cur) | random_line_split |
capturehost.py | #!/usr/bin/env python3
import argparse
import socket
import asyncio
import subprocess
import time, os, sys
from PIL import Image
# The default camera parameters
# None marks them as undefined
# if defined they are (shutter, awb_1, awb_2, gain)
defaultCamParams = (33164, 1.5, 1.5, 4.8)
# ID -> ClienConnection
# Ordinary clients with camera for capturing
camera_clients = {}
# Additional clients for room periphrals (like servos)
aux_clients = {}
class ClientMsgError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientMsgError: {}".format(self.msg)
class ClientResultError(ClientMsgError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientResultError: {}".format(self.msg)
class ClientNotFoundError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "ClientNotFoundError: {}".format(self.msg)
class ClientSocketError(Exception):
pass
# An instance of this class handles a connection to a single client.
# It offsers async functions to execute commands on the client.
# These functions wait until the command finishes.
# It may be a good idea to split this class into subclasses for the different
# client types. For now, all functions are in one class.
class ClientConnection:
def __init__(self, reader, writer):
self._reader = reader
self._writer = writer
self._buffer = str()
self._cid = None
# This function should be called first, it gets the ID of the client by reading the HI message form the socket
@asyncio.coroutine
def client_id(self):
if self._cid != None:
return self._cid
print("Waitng for client message")
msg = yield from self._get_next_message()
print(msg[0])
d = msg.split('|')
if len(d) != 2 or d[0] != 'HI':
print("Unexpected message from client\
(Expected HI message):\n%s" % msg)
self.remove_client()
raise ClientMsgError(msg)
if d[1].isdigit():
self._cid = int(d[1])
print("Camera client connected, id=%d" % self._cid)
else:
self._cid = d[1]
print("Auxiliary client connected, name=%s" % self._cid)
# confirm connection
yield from self._send("CON|%s~" % d[1])
return self._cid
def remove_client(self):
global camera_clients
global aux_clients
if self._cid in camera_clients.keys():
del camera_clients[self._cid]
if self._cid in aux_clients.keys():
del aux_clients[self._cid]
self._writer.close()
# Motor client only: Set the position of the slide. pos is from 0 to 1
@asyncio.coroutine
def aux_set_slide(self, pos):
yield from self._send("SET|%f~" % pos)
yield from self._wait_for_ok("SET")
# Set the camera resolution
# mode = (w, h, fps)
@asyncio.coroutine
def set_resolution(self, mode):
yield from self._send("RES|%d:%d:%d~" % mode)
yield from self._wait_for_ok("RES")
# Take a single image (and store it as <name>)
@asyncio.coroutine
def take_image(self, name):
yield from self._send("SAM|%s~" % name)
yield from self._wait_for_ok("SAM")
# Starts the recording
@asyncio.coroutine
def start_video(self, name):
yield from self._send("REC|%s~" % name)
# Stops the recording
@asyncio.coroutine
def stop_video(self):
yield from self._send("STP|~")
# Starts the stream
@asyncio.coroutine
def start_stream(self):
yield from self._send("STR|%s~")
# Stops the stream
@asyncio.coroutine
def stop_stream(self):
yield from self._send("STSTP|~")
# Automatic camera parameters
@asyncio.coroutine
def param_auto(self):
yield from self._send("CAP|~")
yield from self._wait_for_ok("CAP")
# Lock camera parameters to the current values
@asyncio.coroutine
def param_lock(self):
yield from self._send("CFP|~")
yield from self._wait_for_ok("CFP")
# Set the camera parameters
@asyncio.coroutine
def param_set(self, params):
yield from self._send("CSP|%i:%f:%f:%f~" % params)
yield from self._wait_for_ok("CSP")
# Get the current parameters form a camera
@asyncio.coroutine
def get_cam_params(self, mode):
yield from self.param_auto()
yield from self.set_resolution(mode)
print("Resolution set! Wait 5 sec for exposure values to adapt")
yield from asyncio.sleep(5)
yield from self.param_lock()
yield from self._send("CGP|~")
status, data = yield from self._get_resposne()
if status != "CP":
print("Unexpected response to parameter query: %s" % status)
raise ClientResultError(status)
d = data.split(":")
if (len(d) != 4 or (not isFloat(d[0])) or (not isFloat(d[1])) or
(not isFloat(d[2])) or (not isFloat(d[3]))):
print("Invalid CP message: %s" % data)
raise ClientMsgError(data)
params = (int(d[0]), float(d[1]), float(d[2]), float(d[3]))
return params
# Internal: Read next message form the socket
@asyncio.coroutine
def _get_next_message(self):
while True:
delim = self._buffer.find('~')
if delim != -1:
res = self._buffer[:delim]
self._buffer = self._buffer[delim + 1:]
return res.strip('\r\n')
try:
data = yield from self._reader.read(100)
self._buffer += data.decode('ascii')
except Exception as e:
print(e)
self.remove_client()
raise ClientSocketError()
# Internal: Gets the response for a command
@asyncio.coroutine
def _get_resposne(self):
msg = yield from self._get_next_message()
delim = msg.find("|")
if (msg == "" or delim == -1):
# format invalid
print("Received invalid response: %s" % msg)
raise ClientMsgError(msg)
# status ~ code of the message
# data ~ data of the message
status = msg[0:delim]
data = msg[delim + 1:]
return (status, data)
# Wait for a 'OK' result
@asyncio.coroutine
def _wait_for_ok(self, command):
status, data = yield from self._get_resposne()
if status != "OK" or data != command:
raise ClientResultError(command)
# Send a command
@asyncio.coroutine
def _send(self, msg):
self._writer.write(msg.encode('ascii'))
yield from self._writer.drain()
# Calls the given function with the given paramerters on all clients and waits for the results
@asyncio.coroutine
def command_all_clients(function, *args):
coros = []
for client in camera_clients.values():
if client != None:
coros += [function(client, *args)]
yield from asyncio.gather(*coros)
# Take an image with all cameras
@asyncio.coroutine
def take_client_images(filename, resolution=None, params=None):
if resolution != None:
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
if params != None:
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Take image")
yield from command_all_clients(ClientConnection.take_image, filename)
# Starts the capture on all cameras
@asyncio.coroutine
def start_capture(filename, resolution, params):
#print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
#print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
#print("Starting video")
yield from command_all_clients(ClientConnection.start_video, filename)
# Stops the capture
@asyncio.coroutine
def stop_capture():
#print("Stop video")
yield from command_all_clients(ClientConnection.stop_video)
#print("--> DONE <--")
# Move the marker to a given position and wait for a given time
@asyncio.coroutine
def move_marker(pos, wait=5):
print("Move marker to %f" % pos)
if not "CS" in aux_clients.keys():
raise ClientNotFoundError("Callibration silde client is not connected!")
yield from aux_clients["CS"].aux_set_slide(pos)
if wait > 0:
print("OK, wait %i sec for marker to stop wobbling" % wait)
yield from asyncio.sleep(wait)
# Execute the calibration
# Move the marker step-by-step and take images
@asyncio.coroutine
def do_calibration(resolution, steps, params):
print("Start calibartion run")
#params = (1000, 1, 1, 1.5);
#defaultCamParams = (33164, 1.5, 1.5, 4.8)
print("Set params")
yield from command_all_clients(ClientConnection.param_set, params)
print("Set resolution")
yield from command_all_clients(ClientConnection.set_resolution, resolution)
print("Now start moving marker")
for p in range(steps + 1):
cur = p / float(steps)
yield from move_marker(cur)
yield from take_client_images("/home/pi/flow.df/calib_%02d.jpg" % p)
print("Returning marker to home position")
yield from move_marker(0, 0)
# Starts the stream on a given client (UNTESTED)
@asyncio.coroutine
def do_streaming(client):
#open socket
listening_sock = socket.socket()
listening_sock.bind(('0.0.0.0', 8000))
listening_sock.listen(0)
#send msg to cam to start stream
yield from client.start_stream()
#accept connection from camera
connection = listening_sock.accept()[0].makefile('rb')
print("streaming connection opened")
try:
#open player
print("open player")
cmdline = ['mplayer', '-fps', '90', '-cache', '1024', '-']
player = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
print("player opened")
while True:
data = connection.read(1024)
#print "data received"
if not data:
print("no data")
break
player.stdin.write(data)
except:
print("data reading and writing to mplayer failed")
connection.close()
listening_sock.close()
player.terminate()
client.stop_stream()
print("Stream eneded")
# This is the 'main' function of this.
# In here all the CLI input processing is done and the commands are called.
@asyncio.coroutine
def handle_user_input(stdin, master_id, exposure_correction, cam_mode, loop=None):
global camera_clients
# the current camera parameters
currentParamData = defaultCamParams
while True:
line = yield from stdin.readline()
data = line.decode('ascii').strip('\r\n\t ')
if data == 'h':
# help
print("c Start capture")
print("e Get exposure values")
print("ec Set exposure correction value")
print("f Close / open valves to fill box with smoke")
print("p Take position image")
print("s Take sample image")
print("q quit")
print("l live video (requires mplayer)")
print("cal start calibration")
elif data.startswith('ec'):
p = data.split(' ')
if len(p) == 1:
# no argument -> print current value
print("exposure_correction = %f" % exposure_correction)
else:
# else: has argument -> check if valid
if not isFloat(p[1]) or float(p[1]) <= 0:
print("illegal value: %s" % p[1])
continue
# set new value and print
exposure_correction = float(p[1])
print("OK, exposure_correction = %f" % exposure_correction)
# print resulting gain (if parameters are set)
if currentParamData != None:
(s, a1, a2, g) = currentParamData
g = min(max(g * exposure_correction, 1), 12)
print("gain is now = %f" % g)
currentParamData = (s, a1, a2, g)
elif data == 'p':
# position image
try:
yield from take_client_images("/home/pi/flow.df/pos.jpg", (2592, 1944, 1))
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("--> DONE <--")
elif data == 's':
print("Take sample image")
try:
yield from take_client_images("/home/pi/flow.df/sample.jpg", cam_mode, currentParamData)
except (ClientMsgError, ClientSocketError) as err:
print(err)
sampleFilename = '/home/student/frejek/rpi/%d/sample.jpg'
images = []
for i in range(0,5):
im = Image.open(sampleFilename%(i+1))
im = im.transpose(Image.ROTATE_90)
#im.show()
images.append(im)
new_im = Image.new('L', (5*1080, 1920))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/home/student/frejek/samples.png')
size = 1.6*1080, 1.6*384
new_im.thumbnail(size, Image.ANTIALIAS)
new_im.show()
print("--> DONE <--")
elif data == 'e':
print("Get exposure values")
if (not master_id in camera_clients.keys()) or camera_clients[master_id] == None:
print(camera_clients.keys())
print("No client: %d (reference client id)" % master_id)
continue
try:
currentParamData = yield from camera_clients[master_id].get_cam_params(cam_mode)
except (ClientMsgError, ClientSocketError) as err:
print(err)
continue
(s, a1, a2, g) = currentParamData
g2 = min(max(g * exposure_correction, 1), 12)
currentParamData = (s, a1, a2, g2)
print("Camera params: shutter=%s, awb=(%s, %s), gain=%s, gain(corrected)=%f" % (s, a1, a2, g, g2))
elif data.startswith('c'):
arg = data.split(" ")
if arg[0] == 'c':
if len(arg) < 6:
print('C is missing additional arguments (videoFolder, percentageLower, numOfCaptures, smokeSecondsInit, smokeSecondsLoop)')
else:
print('')
print('---------------------------------------------------------')
print('Make sure there is enough smoke fluid in smoke machine!!!')
print('---------------------------------------------------------')
print('')
#print("Release smoke and record videos.")
if currentParamData == None:
# capture without parameters set is not allowed
print("Can't start capture without exposure data")
continue
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
videoFolder = arg[1]
percentageLower = arg[2]
numOfCaptures = int(arg[3])
smokeSecondsInit = arg[4]
smokeSecondsLoop = arg[5]
print("Fill box with smoke, init.")
subprocess.call(['./fillBox.sh', smokeSecondsInit])
print("Wait 150s for smoke to disappear and people leaving the room.")
time.sleep(150)
# loop for each capture process
numberNaming = 0
for capture in range(numOfCaptures):
try:
print("Start capture.")
yield from start_capture("rec.h264", cam_mode, currentParamData)
time.sleep(5)
print("Open valves.")
subprocess.call(['./servocontrol.sh', 'plume', percentageLower])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print('Capture in progress.')
time.sleep(18)
try:
print("Stopping capture.")
yield from stop_capture()
print("Closing valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Download videos.")
while os.path.isdir(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)):
numberNaming = numberNaming+1
if numberNaming>9999:
print('Folder exists: %s'%(videoFolder+'_%s_%04d' % (percentageLower, numberNaming)))
break
subprocess.call(['./grabVideos.sh', videoFolder+'_%s_%04d' % (percentageLower, numberNaming)])
except (ClientMsgError, ClientSocketError) as err:
print(err)
print("Done with capture %04d."%capture)
if capture+1<numOfCaptures:
time.sleep(60)
print("Close valves to fill box with smoke.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
print("Fill box with smoke, wait for smoke to disappear, loop.")
subprocess.call(['./fillBox.sh', smokeSecondsLoop])
time.sleep(180)
print("Done with all captures 'c'.")
print("Close valves.")
subprocess.call(['./servocontrol.sh', 'fill', '0'])
elif arg[0] == 'cal':
if len(arg) < 2:
print('CAL is missing additional argument (calibFolder)')
else:
calibFolder = arg[1]
try:
yield from do_calibration((2592, 1944, 1), 20, currentParamData)
print("--> DONE CAL <--")
print("--> gather calibs <--")
subprocess.call(['./gather_calib.sh', calibFolder])
print("--> process images <--")
subprocess.Popen(['./calibration/process_images.sh', calibFolder, '--ed=1000'])
print("--> DONE process images <--")
except (ClientMsgError, ClientSocketError, ClientNotFoundError) as err:
print(err)
else:
print("Unknown command: ", data)
elif data.startswith('l'):
arg = data.split(' ')
if len(arg) == 1:
# no argument -> print current value
print("please add which client should be streamed")
continue
else:
# else: has argument -> check if valid
if not arg[1].isdigit():
print("Not a decimal number: %s" % arg[1])
continue
client_num = int(arg[1])
if not client_num in camera_clients.keys() or camera_clients[client_num] == None:
print("No client with id: %d (reference client id)" % client_num)
continue
try:
yield from do_streaming(camera_clients[client_num])
except (ClientMsgError, ClientSocketError) as err:
print(err)
elif data == 'q':
# q is same as CRTL+c
raise KeyboardInterrupt
else:
print("Unknown command: ", data)
# checks if a value can be interpreted as a float
# fails with exponential values, but they do not occure here
def isFloat(str):
return str.replace(".", "", 1).isdigit()
# Handles a connection from a new client
@asyncio.coroutine
def handle_connection(reader, writer):
global camera_clients
global aux_clients
try:
c = ClientConnection(reader, writer)
new_client_id = yield from c.client_id()
if type(new_client_id) is int:
camera_clients[new_client_id] = c
else:
aux_clients[new_client_id] = c
except (ClientMsgError, ClientSocketError) as e:
print(e)
print("Failed to establish connection")
return
# Returns async reader for stdio
@asyncio.coroutine
def setup_stdio_reader(loop=None):
if not loop:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
yield from loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader
def main(argv):
parser = argparse.ArgumentParser(
description='Host program for smoke capture camera system')
parser.add_argument('-r', '--reference', type=int, default=1,
help='Client id of the reference client (used for\
parameter synchronization)')
parser.add_argument('-W', '--width', type=int, default=1296,
help='Width (resolution) of the recorded video')
parser.add_argument('-H', '--height', type=int, default=972,
help='Height (resolution) of the recorded video')
parser.add_argument('-F', '--fps', type=int, default=30,
help='Frame rate of the recorded video')
#parser.add_argument('-b', '--bind', default='131.159.40.51',
# help='Bind address used for the listening socket')
parser.add_argument('-e', '--exposure_correction', default='0.8',
type=float,
help='Defines the initial value for the gain\
correction (this value is multilied with the\
current gain)')
args = parser.parse_args()
master_id = args.reference
default_ec = args.exposure_correction
cam_mode = (args.width, args.height, args.fps)
loop = asyncio.get_event_loop()
stdin_reader = loop.run_until_complete(setup_stdio_reader(loop=loop))
input_coro = handle_user_input(stdin_reader, master_id, default_ec, cam_mode, loop=loop)
server_coro = asyncio.start_server(handle_connection, '0.0.0.0', 54321, loop=loop)
server = loop.run_until_complete(server_coro)
try:
loop.run_until_complete(input_coro)
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == "__main__":
| main(sys.argv) | conditional_block | |
lib.rs | /*!
This crate provides a fast implementation of agglomerative
[hierarchical clustering](https://en.wikipedia.org/wiki/Hierarchical_clustering).
The ideas and implementation in this crate are heavily based on the work of
Daniel Müllner, and in particular, his 2011 paper,
[Modern hierarchical, agglomerative clustering algorithms](https://arxiv.org/pdf/1109.2378.pdf).
Parts of the implementation have also been inspired by his C++
library, [`fastcluster`](http://danifold.net/fastcluster.html).
Müllner's work, in turn, is based on the hierarchical clustering facilities
provided by MATLAB and
[SciPy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html).
The runtime performance of this library is on par with Müllner's `fastcluster`
implementation.
# Overview
The most important parts of this crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into | f) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
self.into_method().sqrt(dend);
}
}
impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough room for `size`
/// observations.
fn reset(&mut self, size: usize) {
self.sizes.clear();
self.sizes.resize(size, 1);
self.active.reset(size);
self.min_dists.clear();
self.min_dists.resize(size, T::infinity());
self.set.reset(size);
self.chain.clear();
self.chain.resize(size, 0);
self.queue.reset(size);
self.nearest.clear();
self.nearest.resize(size, 0);
}
/// Merge `cluster1` and `cluster2` with the given `dissimilarity` into the
/// given dendrogram.
fn merge(
&mut self,
dend: &mut Dendrogram<T>,
cluster1: usize,
cluster2: usize,
dissimilarity: T,
) {
self.sizes[cluster2] = self.sizes[cluster1] + self.sizes[cluster2];
self.active.remove(cluster1);
dend.push(Step::new(
cluster1,
cluster2,
dissimilarity,
self.sizes[cluster2],
));
}
}
| _method_chain(sel | identifier_name |
lib.rs | /*!
This crate provides a fast implementation of agglomerative
[hierarchical clustering](https://en.wikipedia.org/wiki/Hierarchical_clustering).
The ideas and implementation in this crate are heavily based on the work of
Daniel Müllner, and in particular, his 2011 paper,
[Modern hierarchical, agglomerative clustering algorithms](https://arxiv.org/pdf/1109.2378.pdf).
Parts of the implementation have also been inspired by his C++
library, [`fastcluster`](http://danifold.net/fastcluster.html).
Müllner's work, in turn, is based on the hierarchical clustering facilities
provided by MATLAB and
[SciPy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html).
The runtime performance of this library is on par with Müllner's `fastcluster`
implementation.
# Overview
The most important parts of this crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into_method_chain(self) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
| impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough room for `size`
/// observations.
fn reset(&mut self, size: usize) {
self.sizes.clear();
self.sizes.resize(size, 1);
self.active.reset(size);
self.min_dists.clear();
self.min_dists.resize(size, T::infinity());
self.set.reset(size);
self.chain.clear();
self.chain.resize(size, 0);
self.queue.reset(size);
self.nearest.clear();
self.nearest.resize(size, 0);
}
/// Merge `cluster1` and `cluster2` with the given `dissimilarity` into the
/// given dendrogram.
fn merge(
&mut self,
dend: &mut Dendrogram<T>,
cluster1: usize,
cluster2: usize,
dissimilarity: T,
) {
self.sizes[cluster2] = self.sizes[cluster1] + self.sizes[cluster2];
self.active.remove(cluster1);
dend.push(Step::new(
cluster1,
cluster2,
dissimilarity,
self.sizes[cluster2],
));
}
}
| self.into_method().sqrt(dend);
}
}
| identifier_body |
lib.rs | /*!
This crate provides a fast implementation of agglomerative
[hierarchical clustering](https://en.wikipedia.org/wiki/Hierarchical_clustering).
The ideas and implementation in this crate are heavily based on the work of
Daniel Müllner, and in particular, his 2011 paper,
[Modern hierarchical, agglomerative clustering algorithms](https://arxiv.org/pdf/1109.2378.pdf).
Parts of the implementation have also been inspired by his C++
library, [`fastcluster`](http://danifold.net/fastcluster.html).
Müllner's work, in turn, is based on the hierarchical clustering facilities
provided by MATLAB and
[SciPy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html).
The runtime performance of this library is on par with Müllner's `fastcluster`
implementation.
# Overview
The most important parts of this crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
} | io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into_method_chain(self) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
self.into_method().sqrt(dend);
}
}
impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough room for `size`
/// observations.
fn reset(&mut self, size: usize) {
self.sizes.clear();
self.sizes.resize(size, 1);
self.active.reset(size);
self.min_dists.clear();
self.min_dists.resize(size, T::infinity());
self.set.reset(size);
self.chain.clear();
self.chain.resize(size, 0);
self.queue.reset(size);
self.nearest.clear();
self.nearest.resize(size, 0);
}
/// Merge `cluster1` and `cluster2` with the given `dissimilarity` into the
/// given dendrogram.
fn merge(
&mut self,
dend: &mut Dendrogram<T>,
cluster1: usize,
cluster2: usize,
dissimilarity: T,
) {
self.sizes[cluster2] = self.sizes[cluster1] + self.sizes[cluster2];
self.active.remove(cluster1);
dend.push(Step::new(
cluster1,
cluster2,
dissimilarity,
self.sizes[cluster2],
));
}
} | }
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error { | random_line_split |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1 != idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1 != idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2 != idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if !temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if !v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first != second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos != second && temp_pos != first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() { | continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
} | if *idx == id { | random_line_split |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1 != idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1 != idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2 != idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if !temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if !v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first != second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos != second && temp_pos != first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| identifier_name | ||
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1 != idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1 != idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2 != idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if !temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if !v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first != second );
let first_ptr = self.vertices[first].clone();
let second_ptr = | e_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos != second && temp_pos != first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.writ | identifier_body |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
| self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1 != idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2 != idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if !temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if !v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first != second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos != second && temp_pos != first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| } else {
assert!( idx1 != idx2);
| conditional_block |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor};
use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term;
use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
}
fn | (w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
}
| new_size_info | identifier_name |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor}; | use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
}
fn new_size_info(w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
} | use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term; | random_line_split |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor};
use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term;
use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k != ErrorKind::Interrupted && k != ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize |
fn new_size_info(w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
}
| {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
} | identifier_body |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/bwmarrin/discordgo"
_ "github.com/go-sql-driver/mysql"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var Config struct {
Token string `json:"Token"`
Prefix string `json:"Prefix"`
Owner_id string `json:"Owner_id"`
database_host string `json:"database_host"`
database_port string `json:"database_port"`
database_connect string `json:"database_connect"`
database_socket string `json:"database_socket"`
database_user string `json:"database_user"`
database_password string `json:"database_password"`
database_table string `json:"database_table"`
}
type Channel struct {
id int
name string
owner_id string
category_id string
voicechannel_id string
textchannel_id string
settingmessage_id string
options string
}
var DB *sql.DB
func main() {
// Logo
//goland:noinspection GoPrintFunctions to make my IDE not shit itself
fmt.Println(" _______ _____ _ \n" +
" |__ __| / ____| | \n" +
" | | ___ _ __ ___ _ __ | | | |__ __ _ _ __ \n" +
" | |/ _ \\ '_ ` _ \\| '_ \\| | | '_ \\ / _` | '_ \\ \n" +
" | | __/ | | | | | |_) | |____| | | | (_| | | | | \n" +
" |_|\\___|_| |_| |_| .__/ \\_____|_| |_|\\__,_|_| |_| \n" +
" | | \n" +
" |_| \n")
//Read config file
fmt.Println("INIT: Config file")
configFile, err := os.Open("config.json")
if err != nil {
fmt.Println("Unable to read the config file!")
panic(err)
}
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&Config); err != nil {
fmt.Println("Unable to parse Json.")
panic(err)
}
fmt.Println("Config read successfully.")
//Init Database
//TODO: implement Unix socket connections
fmt.Println("INIT: Database in TCP mode")
//TODO:Need to use the json file because for some fucking reason it doesnt want to work and error out.
//db, err := sql.Open("mysql", Config.database_user + ":" + Config.database_password + "@tcp(" + Config.database_host + ":" + Config.database_port + ")/" + Config.database_table)
db, err := sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/TempChan")
if err != nil {
panic(err)
} else {
fmt.Println("Databast init success!")
}
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
//Setting global variable...
DB = db
//New Discord session
fmt.Println("INIT: Discord Connection")
dg, err := discordgo.New("Bot " + Config.Token)
if err != nil {
fmt.Println("Error creating discord session. Discord down Lulz", err)
return
} else {
fmt.Println("Discord session created.")
}
//Register Handlers
fmt.Println("Registering handlers...")
dg.AddHandler(MessageCreate)
dg.AddHandler(MessageReactions)
fmt.Println("Setting Intents...")
dg.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsAllWithoutPrivileged)
err = dg.Open()
if err != nil {
fmt.Println("Error opening connection. ", err)
return
}
fmt.Println("Done. TempChan is running. Logged in as: " + dg.State.User.Username + " Prefix set to: " + Config.Prefix)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
dg.Close()
db.Close()
}
func MessageReactions(s *discordgo.Session, r *discordgo.MessageReactionAdd) {
//Prevent bot reactions to trigger itself.
if r.UserID == s.State.User.ID {
return
}
//Check the message where the reaction is made, if someone reacted to a message not from the bot, just return it and dont do a SQL query.
OrigMessage, err := s.ChannelMessage(r.ChannelID, r.MessageID)
if OrigMessage.Author.ID != s.State.User.ID {
return
}
var channel = Channel{}
_ = DB.QueryRow("SELECT * from channels WHERE settingmessage_id = " + r.MessageID).Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
if r.MessageReaction.Emoji.Name == "❌" && r.MessageReaction.UserID == "218310787289186304" && r.MessageID == channel.settingmessage_id {
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where settingmessage_id = ?")
_, _ = stmt.Exec(r.MessageID)
}
if r.MessageReaction.Emoji.Name == "🔞" && r.MessageReaction.UserID != s.State.User.ID && r.MessageID == channel.settingmessage_id {
s.ChannelEditComplex(channel.textchannel_id, &discordgo.ChannelEdit{NSFW: true})
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", r.UserID);
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", s.State.User.ID)
}
if err != nil {
fmt.Println(err)
}
}
func MessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Igno | re all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if strings.Contains(m.Content, "iOS") {
_, err := s.ChannelMessageSend(m.ChannelID, "> " + m.Content + "\n" + m.Author.Mention() + " Its `Ios` Not `iOS` :)")
if err != nil {
fmt.Println("Error: ", err)
}
}
// Show a simple help list.
if m.Content == Config.Prefix + "help" {
s.ChannelMessageSend(m.ChannelID, "To create Channels: ]cc <name>\n" +
"Setting a Limit ]climit <number of people>")
}
if m.Content == Config.Prefix + "channel delete all" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Deleting all channels currently in database.")
channelrows, err := DB.Query("SELECT * from channels")
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Error! Was unable to get data from database.")
fmt.Println(err)
}
var channel = Channel{}
for channelrows.Next() {
_ = channelrows.Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
fmt.Println("Deleting: " + channel.name)
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where id = ?")
_, _ = stmt.Exec(channel.id)
}
}
if m.Content == Config.Prefix + "exit" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Shutting down.")
fmt.Println("Got exit call from discord command.")
s.Close()
DB.Close()
os.Exit(0)
} else if m.Content == Config.Prefix + "exit" && m.Author.ID != "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Watch this: <https://www.youtube.com/watch?v=dQw4w9WgXcQ>")
}
if m.Content == Config.Prefix + "cc" {
s.ChannelMessageSend(m.ChannelID, "Error missing channel name!")
}
//todo: make a command framework?
if strings.HasPrefix(m.Content, Config.Prefix + "cc ") {
tempchan, err := s.ChannelMessageSend(m.ChannelID, "Creating temporay channels for you...")
//Create the first category first.
channelCategory, err := s.GuildChannelCreate(m.GuildID, strings.Trim(m.Content, Config.Prefix + "cc "), discordgo.ChannelTypeGuildCategory)
channelText, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildText,
Topic: "Created by: " + m.Author.Username + ". This is a temporary channel.",
Position: 0,
ParentID: channelCategory.ID,
NSFW: false,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the text channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
// Create the text channel
channelVoice, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildVoice,
ParentID: channelCategory.ID,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the voice channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
channelSettings, err := s.ChannelMessageSend(channelText.ID, "```*** Channel Settings ***```\nThis is your newly created channel.\n" +
"The channel owner is: " + m.Author.Mention() + "\n" +
"The Channel owner and staff are able to change the settings via the emojis. You are also still required to obey the server rules!\n\n" +
"❌ = Delete the channel\n" +
"🔒 = Disable this Chat channel.\n" +
"🔞 = Set Chat as NSFW\n\n" +
"For more commands enter ]channel help\n\n\n" +
"⚠ Note: Staff of this server is also able to change every setting.")
//Saving TO DB
fmt.Println("Saving to DB...")
stmt, err := DB.Prepare("INSERT INTO `channels` (`name`, `owner_id`, `category_id`, `voicechannel_id`, `textchannel_id`, `settingmessage_id`, `options`) VALUES (?, ?, ?, ?, ?, ?, ?)")
_, err = stmt.Exec(strings.Trim(m.Content, Config.Prefix + "cc "), m.Author.ID, channelCategory.ID, channelVoice.ID, channelText.ID, channelSettings.ID, nil)
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
} else {
s.ChannelMessageDelete(m.ChannelID, tempchan.ID)
s.ChannelMessageSend(m.ChannelID, "Channel created. please join the channel within 30 seconds, or it will be deleted")
fmt.Println("Created a new temporary channel to watch on... ")
}
fmt.Println("Adding reactions...")
err = s.ChannelMessagePin(channelText.ID, channelSettings.ID)
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "❌")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔒")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔞")
if err != nil {
fmt.Println("Unable to add a reaction...")
}
fmt.Println("Channel creation Done.")
}
}
| identifier_body | |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/bwmarrin/discordgo"
_ "github.com/go-sql-driver/mysql"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var Config struct {
Token string `json:"Token"`
Prefix string `json:"Prefix"`
Owner_id string `json:"Owner_id"`
database_host string `json:"database_host"`
database_port string `json:"database_port"`
database_connect string `json:"database_connect"`
database_socket string `json:"database_socket"`
database_user string `json:"database_user"`
database_password string `json:"database_password"`
database_table string `json:"database_table"`
}
type Channel struct {
id int
name string
owner_id string
category_id string
voicechannel_id string
textchannel_id string
settingmessage_id string
options string
}
var DB *sql.DB
func main() {
// Logo
//goland:noinspection GoPrintFunctions to make my IDE not shit itself
fmt.Println(" _______ _____ _ \n" +
" |__ __| / ____| | \n" +
" | | ___ _ __ ___ _ __ | | | |__ __ _ _ __ \n" +
" | |/ _ \\ '_ ` _ \\| '_ \\| | | '_ \\ / _` | '_ \\ \n" +
" | | __/ | | | | | |_) | |____| | | | (_| | | | | \n" +
" |_|\\___|_| |_| |_| .__/ \\_____|_| |_|\\__,_|_| |_| \n" +
" | | \n" +
" |_| \n")
//Read config file
fmt.Println("INIT: Config file")
configFile, err := os.Open("config.json")
if err != nil {
fmt.Println("Unable to read the config file!")
panic(err)
}
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&Config); err != nil {
fmt.Println("Unable to parse Json.")
panic(err)
}
fmt.Println("Config read successfully.")
//Init Database
//TODO: implement Unix socket connections
fmt.Println("INIT: Database in TCP mode")
//TODO:Need to use the json file because for some fucking reason it doesnt want to work and error out.
//db, err := sql.Open("mysql", Config.database_user + ":" + Config.database_password + "@tcp(" + Config.database_host + ":" + Config.database_port + ")/" + Config.database_table)
db, err := sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/TempChan")
if err != nil {
panic(err)
} else {
fmt.Println("Databast init success!")
}
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
//Setting global variable...
DB = db
//New Discord session
fmt.Println("INIT: Discord Connection")
dg, err := discordgo.New("Bot " + Config.Token)
if err != nil {
fmt.Println("Error creating discord session. Discord down Lulz", err)
return
} else {
fmt.Println("Discord session created.")
}
//Register Handlers
fmt.Println("Registering handlers...")
dg.AddHandler(MessageCreate)
dg.AddHandler(MessageReactions)
fmt.Println("Setting Intents...")
dg.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsAllWithoutPrivileged)
err = dg.Open()
if err != nil {
fmt.Println("Error opening connection. ", err)
return
}
fmt.Println("Done. TempChan is running. Logged in as: " + dg.State.User.Username + " Prefix set to: " + Config.Prefix)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
dg.Close()
db.Close()
}
func MessageReactions(s *discordgo.Session, r *discordgo.MessageReactionAdd) {
//Prevent bot reactions to trigger itself.
if r.UserID == s.State.User.ID {
return
}
//Check the message where the reaction is made, if someone reacted to a message not from the bot, just return it and dont do a SQL query.
OrigMessage, err := s.ChannelMessage(r.ChannelID, r.MessageID)
if OrigMessage.Author.ID != s.State.User.ID {
return
}
var channel = Channel{}
_ = DB.QueryRow("SELECT * from channels WHERE settingmessage_id = " + r.MessageID).Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
if r.MessageReaction.Emoji.Name == "❌" && r.MessageReaction.UserID == "218310787289186304" && r.MessageID == channel.settingmessage_id {
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where settingmessage_id = ?")
_, _ = stmt.Exec(r.MessageID)
}
if r.MessageReaction.Emoji.Name == "🔞" && r.MessageReaction.UserID != s.State.User.ID && r.MessageID == channel.settingmessage_id {
s.ChannelEditComplex(channel.textchannel_id, &discordgo.ChannelEdit{NSFW: true})
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", r.UserID);
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", s.State.User.ID)
}
if err != nil {
fmt.Println(err)
}
}
func MessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if strings.Contains(m.Content, "iOS") {
_, err := s.ChannelMessageSend(m.ChannelID, "> " + m.Content + "\n" + m.Author.Mention() + " Its `Ios` Not `iOS` :)")
if err != nil {
fmt.Println("Error: ", err)
}
}
// Show a simple help list.
if m.Content == Config.Prefix + "help" {
s.ChannelMessageSend(m.ChannelID, "To create Channels: ]cc <name>\n" +
"Setting a Limit ]climit <number of people>")
}
if m.Content == Config.Prefix + "channel delete all" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Deleting all channels currently in database.")
channelrows, err := DB.Query("SELECT * from channels")
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Error! Was unable to get data from database.")
fmt.Println(err)
}
var channel = Channel{}
for channelrows.Next() {
_ = channelrows.Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
fmt.Println("Deleting: " + channel.name)
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where id = ?")
_, _ = stmt.Exec(channel.id)
}
}
if m.Content == Config.Prefix + "exit" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Shutting down.")
fmt.Println("Got exit call from discord command.")
s.Close()
DB.Close()
os.Exit(0)
} else if m.Content == Config.Prefix + "exit" && m.Author.ID != "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Watch this: <https://www.youtube.com/watch?v=dQw4w9WgXcQ>")
}
if m.Content == Config.Prefix + "cc" {
s.Chann | make a command framework?
if strings.HasPrefix(m.Content, Config.Prefix + "cc ") {
tempchan, err := s.ChannelMessageSend(m.ChannelID, "Creating temporay channels for you...")
//Create the first category first.
channelCategory, err := s.GuildChannelCreate(m.GuildID, strings.Trim(m.Content, Config.Prefix + "cc "), discordgo.ChannelTypeGuildCategory)
channelText, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildText,
Topic: "Created by: " + m.Author.Username + ". This is a temporary channel.",
Position: 0,
ParentID: channelCategory.ID,
NSFW: false,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the text channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
// Create the text channel
channelVoice, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildVoice,
ParentID: channelCategory.ID,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the voice channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
channelSettings, err := s.ChannelMessageSend(channelText.ID, "```*** Channel Settings ***```\nThis is your newly created channel.\n" +
"The channel owner is: " + m.Author.Mention() + "\n" +
"The Channel owner and staff are able to change the settings via the emojis. You are also still required to obey the server rules!\n\n" +
"❌ = Delete the channel\n" +
"🔒 = Disable this Chat channel.\n" +
"🔞 = Set Chat as NSFW\n\n" +
"For more commands enter ]channel help\n\n\n" +
"⚠ Note: Staff of this server is also able to change every setting.")
//Saving TO DB
fmt.Println("Saving to DB...")
stmt, err := DB.Prepare("INSERT INTO `channels` (`name`, `owner_id`, `category_id`, `voicechannel_id`, `textchannel_id`, `settingmessage_id`, `options`) VALUES (?, ?, ?, ?, ?, ?, ?)")
_, err = stmt.Exec(strings.Trim(m.Content, Config.Prefix + "cc "), m.Author.ID, channelCategory.ID, channelVoice.ID, channelText.ID, channelSettings.ID, nil)
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
} else {
s.ChannelMessageDelete(m.ChannelID, tempchan.ID)
s.ChannelMessageSend(m.ChannelID, "Channel created. please join the channel within 30 seconds, or it will be deleted")
fmt.Println("Created a new temporary channel to watch on... ")
}
fmt.Println("Adding reactions...")
err = s.ChannelMessagePin(channelText.ID, channelSettings.ID)
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "❌")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔒")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔞")
if err != nil {
fmt.Println("Unable to add a reaction...")
}
fmt.Println("Channel creation Done.")
}
}
| elMessageSend(m.ChannelID, "Error missing channel name!")
}
//todo: | conditional_block |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/bwmarrin/discordgo"
_ "github.com/go-sql-driver/mysql"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var Config struct {
Token string `json:"Token"`
Prefix string `json:"Prefix"`
Owner_id string `json:"Owner_id"`
database_host string `json:"database_host"`
database_port string `json:"database_port"`
database_connect string `json:"database_connect"`
database_socket string `json:"database_socket"`
database_user string `json:"database_user"`
database_password string `json:"database_password"`
database_table string `json:"database_table"`
}
type Channel struct {
id int
name string
owner_id string
category_id string
voicechannel_id string
textchannel_id string
settingmessage_id string
options string
}
var DB *sql.DB
func main() {
// Logo
//goland:noinspection GoPrintFunctions to make my IDE not shit itself
fmt.Println(" _______ _____ _ \n" +
" |__ __| / ____| | \n" +
" | | ___ _ __ ___ _ __ | | | |__ __ _ _ __ \n" +
" | |/ _ \\ '_ ` _ \\| '_ \\| | | '_ \\ / _` | '_ \\ \n" +
" | | __/ | | | | | |_) | |____| | | | (_| | | | | \n" +
" |_|\\___|_| |_| |_| .__/ \\_____|_| |_|\\__,_|_| |_| \n" +
" | | \n" +
" |_| \n")
//Read config file
fmt.Println("INIT: Config file")
configFile, err := os.Open("config.json")
if err != nil {
fmt.Println("Unable to read the config file!")
panic(err)
}
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&Config); err != nil {
fmt.Println("Unable to parse Json.")
panic(err)
}
fmt.Println("Config read successfully.")
//Init Database
//TODO: implement Unix socket connections
fmt.Println("INIT: Database in TCP mode")
//TODO:Need to use the json file because for some fucking reason it doesnt want to work and error out.
//db, err := sql.Open("mysql", Config.database_user + ":" + Config.database_password + "@tcp(" + Config.database_host + ":" + Config.database_port + ")/" + Config.database_table)
db, err := sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/TempChan")
if err != nil {
panic(err)
} else {
fmt.Println("Databast init success!")
}
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
//Setting global variable...
DB = db
//New Discord session
fmt.Println("INIT: Discord Connection")
dg, err := discordgo.New("Bot " + Config.Token)
if err != nil {
fmt.Println("Error creating discord session. Discord down Lulz", err)
return
} else {
fmt.Println("Discord session created.")
}
//Register Handlers
fmt.Println("Registering handlers...")
dg.AddHandler(MessageCreate)
dg.AddHandler(MessageReactions)
fmt.Println("Setting Intents...")
dg.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsAllWithoutPrivileged)
err = dg.Open()
if err != nil {
fmt.Println("Error opening connection. ", err)
return
}
fmt.Println("Done. TempChan is running. Logged in as: " + dg.State.User.Username + " Prefix set to: " + Config.Prefix)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
dg.Close()
db.Close()
}
func MessageReactions(s *discordgo.Session, r *discordgo.MessageReactionAdd) {
//Prevent bot reactions to trigger itself.
if r.UserID == s.State.User.ID {
return
}
//Check the message where the reaction is made, if someone reacted to a message not from the bot, just return it and dont do a SQL query.
OrigMessage, err := s.ChannelMessage(r.ChannelID, r.MessageID)
if OrigMessage.Author.ID != s.State.User.ID {
return
}
var channel = Channel{}
_ = DB.QueryRow("SELECT * from channels WHERE settingmessage_id = " + r.MessageID).Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
if r.MessageReaction.Emoji.Name == "❌" && r.MessageReaction.UserID == "218310787289186304" && r.MessageID == channel.settingmessage_id {
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where settingmessage_id = ?")
_, _ = stmt.Exec(r.MessageID)
}
if r.MessageReaction.Emoji.Name == "🔞" && r.MessageReaction.UserID != s.State.User.ID && r.MessageID == channel.settingmessage_id {
s.ChannelEditComplex(channel.textchannel_id, &discordgo.ChannelEdit{NSFW: true})
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", r.UserID);
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", s.State.User.ID)
}
if err != nil {
fmt.Println(err)
}
}
func MessageCrea | go.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if strings.Contains(m.Content, "iOS") {
_, err := s.ChannelMessageSend(m.ChannelID, "> " + m.Content + "\n" + m.Author.Mention() + " Its `Ios` Not `iOS` :)")
if err != nil {
fmt.Println("Error: ", err)
}
}
// Show a simple help list.
if m.Content == Config.Prefix + "help" {
s.ChannelMessageSend(m.ChannelID, "To create Channels: ]cc <name>\n" +
"Setting a Limit ]climit <number of people>")
}
if m.Content == Config.Prefix + "channel delete all" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Deleting all channels currently in database.")
channelrows, err := DB.Query("SELECT * from channels")
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Error! Was unable to get data from database.")
fmt.Println(err)
}
var channel = Channel{}
for channelrows.Next() {
_ = channelrows.Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
fmt.Println("Deleting: " + channel.name)
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where id = ?")
_, _ = stmt.Exec(channel.id)
}
}
if m.Content == Config.Prefix + "exit" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Shutting down.")
fmt.Println("Got exit call from discord command.")
s.Close()
DB.Close()
os.Exit(0)
} else if m.Content == Config.Prefix + "exit" && m.Author.ID != "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Watch this: <https://www.youtube.com/watch?v=dQw4w9WgXcQ>")
}
if m.Content == Config.Prefix + "cc" {
s.ChannelMessageSend(m.ChannelID, "Error missing channel name!")
}
//todo: make a command framework?
if strings.HasPrefix(m.Content, Config.Prefix + "cc ") {
tempchan, err := s.ChannelMessageSend(m.ChannelID, "Creating temporay channels for you...")
//Create the first category first.
channelCategory, err := s.GuildChannelCreate(m.GuildID, strings.Trim(m.Content, Config.Prefix + "cc "), discordgo.ChannelTypeGuildCategory)
channelText, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildText,
Topic: "Created by: " + m.Author.Username + ". This is a temporary channel.",
Position: 0,
ParentID: channelCategory.ID,
NSFW: false,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the text channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
// Create the text channel
channelVoice, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildVoice,
ParentID: channelCategory.ID,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the voice channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
channelSettings, err := s.ChannelMessageSend(channelText.ID, "```*** Channel Settings ***```\nThis is your newly created channel.\n" +
"The channel owner is: " + m.Author.Mention() + "\n" +
"The Channel owner and staff are able to change the settings via the emojis. You are also still required to obey the server rules!\n\n" +
"❌ = Delete the channel\n" +
"🔒 = Disable this Chat channel.\n" +
"🔞 = Set Chat as NSFW\n\n" +
"For more commands enter ]channel help\n\n\n" +
"⚠ Note: Staff of this server is also able to change every setting.")
//Saving TO DB
fmt.Println("Saving to DB...")
stmt, err := DB.Prepare("INSERT INTO `channels` (`name`, `owner_id`, `category_id`, `voicechannel_id`, `textchannel_id`, `settingmessage_id`, `options`) VALUES (?, ?, ?, ?, ?, ?, ?)")
_, err = stmt.Exec(strings.Trim(m.Content, Config.Prefix + "cc "), m.Author.ID, channelCategory.ID, channelVoice.ID, channelText.ID, channelSettings.ID, nil)
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
} else {
s.ChannelMessageDelete(m.ChannelID, tempchan.ID)
s.ChannelMessageSend(m.ChannelID, "Channel created. please join the channel within 30 seconds, or it will be deleted")
fmt.Println("Created a new temporary channel to watch on... ")
}
fmt.Println("Adding reactions...")
err = s.ChannelMessagePin(channelText.ID, channelSettings.ID)
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "❌")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔒")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔞")
if err != nil {
fmt.Println("Unable to add a reaction...")
}
fmt.Println("Channel creation Done.")
}
}
| te(s *discord | identifier_name |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/bwmarrin/discordgo"
_ "github.com/go-sql-driver/mysql"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var Config struct {
Token string `json:"Token"`
Prefix string `json:"Prefix"`
Owner_id string `json:"Owner_id"`
database_host string `json:"database_host"`
database_port string `json:"database_port"`
database_connect string `json:"database_connect"`
database_socket string `json:"database_socket"`
database_user string `json:"database_user"`
database_password string `json:"database_password"`
database_table string `json:"database_table"`
}
type Channel struct {
id int
name string
owner_id string
category_id string
voicechannel_id string
textchannel_id string
settingmessage_id string
options string
}
var DB *sql.DB
func main() {
// Logo
//goland:noinspection GoPrintFunctions to make my IDE not shit itself
fmt.Println(" _______ _____ _ \n" +
" |__ __| / ____| | \n" +
" | | ___ _ __ ___ _ __ | | | |__ __ _ _ __ \n" +
" | |/ _ \\ '_ ` _ \\| '_ \\| | | '_ \\ / _` | '_ \\ \n" +
" | | __/ | | | | | |_) | |____| | | | (_| | | | | \n" +
" |_|\\___|_| |_| |_| .__/ \\_____|_| |_|\\__,_|_| |_| \n" +
" | | \n" +
" |_| \n")
//Read config file
fmt.Println("INIT: Config file")
configFile, err := os.Open("config.json")
if err != nil {
fmt.Println("Unable to read the config file!")
panic(err)
}
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&Config); err != nil {
fmt.Println("Unable to parse Json.")
panic(err)
}
fmt.Println("Config read successfully.")
//Init Database
//TODO: implement Unix socket connections
fmt.Println("INIT: Database in TCP mode")
//TODO:Need to use the json file because for some fucking reason it doesnt want to work and error out.
//db, err := sql.Open("mysql", Config.database_user + ":" + Config.database_password + "@tcp(" + Config.database_host + ":" + Config.database_port + ")/" + Config.database_table)
db, err := sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/TempChan")
if err != nil {
panic(err)
} else {
fmt.Println("Databast init success!")
}
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
//Setting global variable...
DB = db
//New Discord session
fmt.Println("INIT: Discord Connection")
dg, err := discordgo.New("Bot " + Config.Token)
if err != nil {
fmt.Println("Error creating discord session. Discord down Lulz", err)
return
} else {
fmt.Println("Discord session created.")
}
//Register Handlers
fmt.Println("Registering handlers...")
dg.AddHandler(MessageCreate)
dg.AddHandler(MessageReactions)
fmt.Println("Setting Intents...")
dg.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsAllWithoutPrivileged)
err = dg.Open()
if err != nil {
fmt.Println("Error opening connection. ", err)
return
}
fmt.Println("Done. TempChan is running. Logged in as: " + dg.State.User.Username + " Prefix set to: " + Config.Prefix)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
dg.Close()
db.Close()
}
func MessageReactions(s *discordgo.Session, r *discordgo.MessageReactionAdd) {
//Prevent bot reactions to trigger itself.
if r.UserID == s.State.User.ID {
return
}
//Check the message where the reaction is made, if someone reacted to a message not from the bot, just return it and dont do a SQL query.
OrigMessage, err := s.ChannelMessage(r.ChannelID, r.MessageID)
if OrigMessage.Author.ID != s.State.User.ID {
return
}
var channel = Channel{}
_ = DB.QueryRow("SELECT * from channels WHERE settingmessage_id = " + r.MessageID).Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
if r.MessageReaction.Emoji.Name == "❌" && r.MessageReaction.UserID == "218310787289186304" && r.MessageID == channel.settingmessage_id {
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where settingmessage_id = ?")
_, _ = stmt.Exec(r.MessageID)
}
if r.MessageReaction.Emoji.Name == "🔞" && r.MessageReaction.UserID != s.State.User.ID && r.MessageID == channel.settingmessage_id {
s.ChannelEditComplex(channel.textchannel_id, &discordgo.ChannelEdit{NSFW: true})
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", r.UserID);
s.MessageReactionRemove(channel.textchannel_id, channel.settingmessage_id, "🔞", s.State.User.ID)
}
if err != nil {
fmt.Println(err)
}
}
func MessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if strings.Contains(m.Content, "iOS") {
_, err := s.ChannelMessageSend(m.ChannelID, "> " + m.Content + "\n" + m.Author.Mention() + " Its `Ios` Not `iOS` :)")
if err != nil {
fmt.Println("Error: ", err)
}
}
// Show a simple help list.
if m.Content == Config.Prefix + "help" {
s.ChannelMessageSend(m.ChannelID, "To create Channels: ]cc <name>\n" +
"Setting a Limit ]climit <number of people>")
}
if m.Content == Config.Prefix + "channel delete all" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Deleting all channels currently in database.")
channelrows, err := DB.Query("SELECT * from channels")
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Error! Was unable to get data from database.")
fmt.Println(err)
}
var channel = Channel{}
for channelrows.Next() {
_ = channelrows.Scan(&channel.id, &channel.name, &channel.owner_id, &channel.category_id, &channel.voicechannel_id, &channel.textchannel_id, &channel.settingmessage_id, &channel.options)
fmt.Println("Deleting: " + channel.name)
s.ChannelDelete(channel.voicechannel_id)
s.ChannelDelete(channel.textchannel_id)
s.ChannelDelete(channel.category_id)
stmt, _ := DB.Prepare("DELETE from channels where id = ?")
_, _ = stmt.Exec(channel.id)
}
}
if m.Content == Config.Prefix + "exit" && m.Author.ID == "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Shutting down.")
fmt.Println("Got exit call from discord command.")
s.Close()
DB.Close()
os.Exit(0)
} else if m.Content == Config.Prefix + "exit" && m.Author.ID != "218310787289186304" {
s.ChannelMessageSend(m.ChannelID, "Watch this: <https://www.youtube.com/watch?v=dQw4w9WgXcQ>")
}
if m.Content == Config.Prefix + "cc" {
s.ChannelMessageSend(m.ChannelID, "Error missing channel name!")
}
//todo: make a command framework?
if strings.HasPrefix(m.Content, Config.Prefix + "cc ") {
tempchan, err := s.ChannelMessageSend(m.ChannelID, "Creating temporay channels for you...")
//Create the first category first.
channelCategory, err := s.GuildChannelCreate(m.GuildID, strings.Trim(m.Content, Config.Prefix + "cc "), discordgo.ChannelTypeGuildCategory)
channelText, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildText,
Topic: "Created by: " + m.Author.Username + ". This is a temporary channel.",
Position: 0,
ParentID: channelCategory.ID,
NSFW: false,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the text channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
// Create the text channel
channelVoice, err := s.GuildChannelCreateComplex(m.GuildID, discordgo.GuildChannelCreateData{
Name: strings.Trim(m.Content, Config.Prefix + "cc "),
Type: discordgo.ChannelTypeGuildVoice,
ParentID: channelCategory.ID,
})
if err != nil {
s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the voice channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
}
channelSettings, err := s.ChannelMessageSend(channelText.ID, "```*** Channel Settings ***```\nThis is your newly created channel.\n" +
"The channel owner is: " + m.Author.Mention() + "\n" +
"The Channel owner and staff are able to change the settings via the emojis. You are also still required to obey the server rules!\n\n" +
"❌ = Delete the channel\n" +
"🔒 = Disable this Chat channel.\n" +
"🔞 = Set Chat as NSFW\n\n" +
"For more commands enter ]channel help\n\n\n" +
"⚠ Note: Staff of this server is also able to change every setting.")
//Saving TO DB
fmt.Println("Saving to DB...")
stmt, err := DB.Prepare("INSERT INTO `channels` (`name`, `owner_id`, `category_id`, `voicechannel_id`, `textchannel_id`, `settingmessage_id`, `options`) VALUES (?, ?, ?, ?, ?, ?, ?)")
_, err = stmt.Exec(strings.Trim(m.Content, Config.Prefix + "cc "), m.Author.ID, channelCategory.ID, channelVoice.ID, channelText.ID, channelSettings.ID, nil)
| s.ChannelMessageSend(m.ChannelID, "⚠ Sorry, I was unable to create the channel. <@218310787289186304> Logged error to console.")
fmt.Println("Error: ", err)
} else {
s.ChannelMessageDelete(m.ChannelID, tempchan.ID)
s.ChannelMessageSend(m.ChannelID, "Channel created. please join the channel within 30 seconds, or it will be deleted")
fmt.Println("Created a new temporary channel to watch on... ")
}
fmt.Println("Adding reactions...")
err = s.ChannelMessagePin(channelText.ID, channelSettings.ID)
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "❌")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔒")
err = s.MessageReactionAdd(channelText.ID, channelSettings.ID, "🔞")
if err != nil {
fmt.Println("Unable to add a reaction...")
}
fmt.Println("Channel creation Done.")
}
} | if err != nil { | random_line_split |
fetch_github_projects.py | #!/usr/bin/env python3
import sqlite3
import argparse
import urllib.request, json
import math
import os
import time
import sys
import datetime
import re
import subprocess
dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.join(dir, '../projects')
conn = sqlite3.connect(os.path.join(dir, "../database.db"))
conn.execute("PRAGMA journal_mode=WAL")
c = conn.cursor()
per_page = 100
github_login_user = os.environ.get('GITHUB_CLIENT_ID', None)
github_login_password = os.environ.get('GITHUB_CLIENT_PASSWORD', None)
if github_login_user is None or github_login_password is None:
print('Please set up the Github credentials (environment variables GITHUB_CLIENT_ID and GITHUB_CLIENT_PASSWORD) due to the rate limit (see https://developer.github.com/v3/#authentication)!')
credentials_set = False
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page)
else:
credentials_set = True
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page) + "&client_id=%s&client_secret=%s"
def get_last_commit_hash(path):
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
hash = stdout.decode().strip('\n')
return hash
def get_git_commit_count(path):
""" Gets the number of commits without merges from a Git repository. """
process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
number = stdout.decode().strip("\n")
return int(number)
def get_git_commiter_count(path):
""" Gets the number of committers from a Git repository. """
process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
committers = stdout.decode("ISO-8859-1")
return len(committers.split('\n'))
def get_first_last_commit_date(path):
""" Gets the first and repository commit as a timestamp. """
# %at specifies a UNIX time stamp
process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
log = stdout.decode().strip('\n').split('\n')
last = int(log[0])
first = int(log[-1])
return (first, last)
def get_c_cpp_h_assembly_loc(path):
""" Gets the LOC of header and C files using cloc. """
try:
process = subprocess.Popen(['cloc', '.'], cwd=path, stdout=subprocess.PIPE)
except FileNotFoundError:
print("Failed to call cloc (see https://github.com/AlDanial/cloc), please install.")
exit(-1)
stdout, _ = process.communicate()
lines = stdout.decode().split('\n')
c_lines = 0
h_lines = 0
cpp_lines = 0
assembly_lines = 0
for line in lines:
c_match = re.match(r'C \s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if c_match:
c_lines = int(c_match.groups()[0])
h_match = re.match(r'C/C\+\+\sHeader\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if h_match:
h_lines = int(h_match.groups()[0])
cpp_match = re.match(r'C\+\+\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if cpp_match:
cpp_lines = int(cpp_match.groups()[0])
assembly_match = re.match(r'Assembly\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if assembly_match:
assembly_lines = int(assembly_match.groups()[0])
return (c_lines, cpp_lines, h_lines, assembly_lines)
def owner_project_from_github_url(url):
""" Extracts owner and project name from a Github URL. For example, for
https://github.com/graalvm/sulong it returns the tuple (graalvm, sulong). """
if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):
print(str(url) + "is not a valid url!")
exit(-1)
elements = url.split('/')
project_name = elements[-1]
organization_name = elements[-2]
return (organization_name, project_name)
def get_project_dir(url):
""" Map a Github URL to the local Github project directory. """
(project_owner, project_name) = owner_project_from_github_url(url)
project_dir_name = project_owner + '-' + project_name
project_dir_name = os.path.join(project_dir, project_dir_name)
return project_dir_name
def download_project(url):
project_dir_name = get_project_dir(url)
process = subprocess.Popen(['git', 'clone', url, project_dir_name], cwd=project_dir)
process.communicate()
return project_dir_name
def exists(url):
query = """select COUNT(*) FROM GithubProjectUnfiltered WHERE GITHUB_URL = ? """
return c.execute(query, (url,)).fetchone()[0] == 1
def insert_project_entry(data):
github_url = data[str("html_url")]
dirname = download_project(github_url)
dirs = dirname.rstrip(os.sep).split(os.sep)
commit_count = get_git_commit_count(dirname)
committers_count = get_git_commiter_count(dirname)
(first_date, last_date) = get_first_last_commit_date(dirname)
(organization_name, project_name) = owner_project_from_github_url(github_url)
(c_loc, cpp_loc, h_loc, assembly_loc) = get_c_cpp_h_assembly_loc(dirname)
last_hash = get_last_commit_hash(dirname)
project_name = data['name']
owner_name = data['owner']['login']
stargazers = data['stargazers_count']
forks = data['forks_count']
open_issues = data['open_issues_count']
description = data['description']
watchers = data['watchers_count']
fork = data['fork']
creation_date = datetime.datetime.strptime(data['created_at'], "%Y-%m-%dT%H:%M:%SZ").timestamp()
language = data['language']
query = """insert into GithubProjectUnfiltered(
GITHUB_OWNER_NAME,
GITHUB_PROJECT_NAME,
GITHUB_URL,
GITHUB_DESCRIPTION,
GITHUB_NR_STARGAZERS,
GITHUB_NR_WATCHERS,
GITHUB_NR_FORKS,
GITHUB_NR_OPEN_ISSUES,
GITHUB_REPO_CREATION_DATE,
GITHUB_LANGUAGE,
GITHUB_FORK,
PULL_HASH,
PULL_DATE,
CLOC_LOC_C,
CLOC_LOC_H,
CLOC_LOC_ASSEMBLY,
CLOC_LOC_CPP,
GIT_NR_COMMITS,
GIT_NR_COMMITTERS,
GIT_FIRST_COMMIT_DATE,
GIT_LAST_COMMIT_DATE,
PROCESSED
)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0)
"""
try:
c.execute(query,
(owner_name,
project_name,
github_url,
description,
stargazers,
watchers,
forks,
open_issues,
datetime.datetime.fromtimestamp(creation_date).strftime('%Y-%m-%d'),
language,
fork,
last_hash,
datetime.datetime.now().strftime('%Y-%m-%d'),
c_loc,
h_loc,
assembly_loc,
cpp_loc,
commit_count,
committers_count,
datetime.datetime.fromtimestamp(first_date).strftime('%Y-%m-%d'),
datetime.datetime.fromtimestamp(last_date).strftime('%Y-%m-%d'))
)
conn.commit()
except sqlite3.IntegrityError as e:
print(github_url + " is a duplicate!")
def download_projects(stars_from, stars_to):
minutes = 5
current_stars = stars_from
while current_stars <= stars_to:
page = 1
hasResults = True
while hasResults:
if credentials_set:
github_url = github_template % (current_stars, page, github_login_user, github_login_password)
else:
github_url = github_template % (current_stars, page)
print("parsing projects with %d stars" % (current_stars, ))
try:
req = urllib.request.Request(github_url)
req.add_header('Cache-Control', 'max-age=0')
resp = urllib.request.urlopen(req)
json_file = json.loads(resp.read().decode())
#data = json.loads(url.read().decode())
projects = json_file['items']
count = json_file['total_count']
hasResults = count != 0
if hasResults:
print("starting to process " + str(count) + " items")
for project in projects:
|
if count < per_page:
hasResults = False
except urllib.error.HTTPError as e:
#if e.code == 422:
# print('reached page limit ' + str(page))
# hasResults = False
if e.code == 403:
print('exceeded rate limit!')
print('waiting for ' + str(minutes) + 'minutes...')
for minute in range(minutes):
print('.', end="")
sys.stdout.flush()
time.sleep(60)
print()
continue
else:
print(github_url + " " + str(e))
exit(-1)
page += 1
current_stars += 1
if hasResults:
print("...waiting for one minute before continuing with " + str(current_stars) + " stars")
for minute in range(4):
print(".", end="")
time.sleep(15)
#download_projects(140, 145)
#download_projects(145, 150)
#download_projects(150, 155)
#download_projects(155, 160)
#download_projects(160, 170)
#download_projects(170, 175)
#download_projects(175, 180)
#download_projects(180, 185)
#download_projects(85, 190)
#download_projects(185, 220)
#download_projects(210, 230)
#download_projects(220, 250)
#download_projects(245, 255)
#download_projects(250, 260)
#download_projects(260, 270)
#download_projects(270, 290)
#download_projects(290, 300)
#download_projects(300, 310)
#download_projects(310, 320)
#download_projects(320, 330)
#download_projects(330, 340)
#download_projects(340, 360)
#download_projects(360, 400)
#download_projects(400, 410)
#download_projects(410, 420)
#download_projects(420, 430)
#download_projects(430, 450)
#download_projects(450, 490)
#download_projects(490, 495)
#download_projects(495, 500)
#download_projects(500, 520)
#download_projects(521, 550)
#download_projects(551, 600)
#download_projects(601, 650)
#download_projects(651, 700)
#download_projects(701, 750)
#download_projects(751, 800)
#download_projects(800, 820)
#download_projects(821, 850)
#download_projects(850, 900)
#download_projects(900, 920)
#download_projects(920, 940)
#download_projects(940, 960)
#download_projects(960, 1000)
#download_projects(1000, 1100)
#download_projects(1100, 1300)
#download_projects(1300, 1500)
#download_projects(1500, 1600)
#download_projects(1600, 1700)
#download_projects(1700, 1800)
#download_projects(1800, 1900)
#download_projects(1900, 1950)
#download_projects(1950, 2000)
#download_projects(2000, 2100)
#download_projects(2100, 2200)
#download_projects(2200, 2300)
#download_projects(2300, 2330)
#download_projects(2330, 3000)
#download_projects(3000, 3670)
#download_projects(3670, 5000)
#download_projects(5000, 8000)
# <placeholder>
from include.sync_views_to_tables import *
| if not exists(project[str("html_url")]):
insert_project_entry(project) | conditional_block |
fetch_github_projects.py | #!/usr/bin/env python3
import sqlite3
import argparse
import urllib.request, json
import math
import os
import time
import sys
import datetime
import re
import subprocess
dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.join(dir, '../projects')
conn = sqlite3.connect(os.path.join(dir, "../database.db"))
conn.execute("PRAGMA journal_mode=WAL")
c = conn.cursor()
per_page = 100
github_login_user = os.environ.get('GITHUB_CLIENT_ID', None)
github_login_password = os.environ.get('GITHUB_CLIENT_PASSWORD', None)
if github_login_user is None or github_login_password is None:
print('Please set up the Github credentials (environment variables GITHUB_CLIENT_ID and GITHUB_CLIENT_PASSWORD) due to the rate limit (see https://developer.github.com/v3/#authentication)!')
credentials_set = False
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page)
else:
credentials_set = True
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page) + "&client_id=%s&client_secret=%s"
def get_last_commit_hash(path):
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
hash = stdout.decode().strip('\n')
return hash
def get_git_commit_count(path):
""" Gets the number of commits without merges from a Git repository. """
process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
number = stdout.decode().strip("\n")
return int(number)
def get_git_commiter_count(path):
""" Gets the number of committers from a Git repository. """
process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
committers = stdout.decode("ISO-8859-1")
return len(committers.split('\n'))
def get_first_last_commit_date(path):
""" Gets the first and repository commit as a timestamp. """
# %at specifies a UNIX time stamp
process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
log = stdout.decode().strip('\n').split('\n')
last = int(log[0])
first = int(log[-1])
return (first, last)
def get_c_cpp_h_assembly_loc(path):
""" Gets the LOC of header and C files using cloc. """
try:
process = subprocess.Popen(['cloc', '.'], cwd=path, stdout=subprocess.PIPE)
except FileNotFoundError:
print("Failed to call cloc (see https://github.com/AlDanial/cloc), please install.")
exit(-1)
stdout, _ = process.communicate()
lines = stdout.decode().split('\n')
c_lines = 0
h_lines = 0
cpp_lines = 0
assembly_lines = 0
for line in lines:
c_match = re.match(r'C \s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if c_match:
c_lines = int(c_match.groups()[0])
h_match = re.match(r'C/C\+\+\sHeader\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if h_match:
h_lines = int(h_match.groups()[0])
cpp_match = re.match(r'C\+\+\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if cpp_match:
cpp_lines = int(cpp_match.groups()[0])
assembly_match = re.match(r'Assembly\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if assembly_match:
assembly_lines = int(assembly_match.groups()[0])
return (c_lines, cpp_lines, h_lines, assembly_lines)
def owner_project_from_github_url(url):
""" Extracts owner and project name from a Github URL. For example, for
https://github.com/graalvm/sulong it returns the tuple (graalvm, sulong). """
if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):
print(str(url) + "is not a valid url!")
exit(-1)
elements = url.split('/')
project_name = elements[-1]
organization_name = elements[-2]
return (organization_name, project_name)
def get_project_dir(url):
""" Map a Github URL to the local Github project directory. """
(project_owner, project_name) = owner_project_from_github_url(url)
project_dir_name = project_owner + '-' + project_name
project_dir_name = os.path.join(project_dir, project_dir_name)
return project_dir_name
def download_project(url):
project_dir_name = get_project_dir(url)
process = subprocess.Popen(['git', 'clone', url, project_dir_name], cwd=project_dir)
process.communicate()
return project_dir_name
def exists(url):
|
def insert_project_entry(data):
github_url = data[str("html_url")]
dirname = download_project(github_url)
dirs = dirname.rstrip(os.sep).split(os.sep)
commit_count = get_git_commit_count(dirname)
committers_count = get_git_commiter_count(dirname)
(first_date, last_date) = get_first_last_commit_date(dirname)
(organization_name, project_name) = owner_project_from_github_url(github_url)
(c_loc, cpp_loc, h_loc, assembly_loc) = get_c_cpp_h_assembly_loc(dirname)
last_hash = get_last_commit_hash(dirname)
project_name = data['name']
owner_name = data['owner']['login']
stargazers = data['stargazers_count']
forks = data['forks_count']
open_issues = data['open_issues_count']
description = data['description']
watchers = data['watchers_count']
fork = data['fork']
creation_date = datetime.datetime.strptime(data['created_at'], "%Y-%m-%dT%H:%M:%SZ").timestamp()
language = data['language']
query = """insert into GithubProjectUnfiltered(
GITHUB_OWNER_NAME,
GITHUB_PROJECT_NAME,
GITHUB_URL,
GITHUB_DESCRIPTION,
GITHUB_NR_STARGAZERS,
GITHUB_NR_WATCHERS,
GITHUB_NR_FORKS,
GITHUB_NR_OPEN_ISSUES,
GITHUB_REPO_CREATION_DATE,
GITHUB_LANGUAGE,
GITHUB_FORK,
PULL_HASH,
PULL_DATE,
CLOC_LOC_C,
CLOC_LOC_H,
CLOC_LOC_ASSEMBLY,
CLOC_LOC_CPP,
GIT_NR_COMMITS,
GIT_NR_COMMITTERS,
GIT_FIRST_COMMIT_DATE,
GIT_LAST_COMMIT_DATE,
PROCESSED
)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0)
"""
try:
c.execute(query,
(owner_name,
project_name,
github_url,
description,
stargazers,
watchers,
forks,
open_issues,
datetime.datetime.fromtimestamp(creation_date).strftime('%Y-%m-%d'),
language,
fork,
last_hash,
datetime.datetime.now().strftime('%Y-%m-%d'),
c_loc,
h_loc,
assembly_loc,
cpp_loc,
commit_count,
committers_count,
datetime.datetime.fromtimestamp(first_date).strftime('%Y-%m-%d'),
datetime.datetime.fromtimestamp(last_date).strftime('%Y-%m-%d'))
)
conn.commit()
except sqlite3.IntegrityError as e:
print(github_url + " is a duplicate!")
def download_projects(stars_from, stars_to):
minutes = 5
current_stars = stars_from
while current_stars <= stars_to:
page = 1
hasResults = True
while hasResults:
if credentials_set:
github_url = github_template % (current_stars, page, github_login_user, github_login_password)
else:
github_url = github_template % (current_stars, page)
print("parsing projects with %d stars" % (current_stars, ))
try:
req = urllib.request.Request(github_url)
req.add_header('Cache-Control', 'max-age=0')
resp = urllib.request.urlopen(req)
json_file = json.loads(resp.read().decode())
#data = json.loads(url.read().decode())
projects = json_file['items']
count = json_file['total_count']
hasResults = count != 0
if hasResults:
print("starting to process " + str(count) + " items")
for project in projects:
if not exists(project[str("html_url")]):
insert_project_entry(project)
if count < per_page:
hasResults = False
except urllib.error.HTTPError as e:
#if e.code == 422:
# print('reached page limit ' + str(page))
# hasResults = False
if e.code == 403:
print('exceeded rate limit!')
print('waiting for ' + str(minutes) + 'minutes...')
for minute in range(minutes):
print('.', end="")
sys.stdout.flush()
time.sleep(60)
print()
continue
else:
print(github_url + " " + str(e))
exit(-1)
page += 1
current_stars += 1
if hasResults:
print("...waiting for one minute before continuing with " + str(current_stars) + " stars")
for minute in range(4):
print(".", end="")
time.sleep(15)
#download_projects(140, 145)
#download_projects(145, 150)
#download_projects(150, 155)
#download_projects(155, 160)
#download_projects(160, 170)
#download_projects(170, 175)
#download_projects(175, 180)
#download_projects(180, 185)
#download_projects(85, 190)
#download_projects(185, 220)
#download_projects(210, 230)
#download_projects(220, 250)
#download_projects(245, 255)
#download_projects(250, 260)
#download_projects(260, 270)
#download_projects(270, 290)
#download_projects(290, 300)
#download_projects(300, 310)
#download_projects(310, 320)
#download_projects(320, 330)
#download_projects(330, 340)
#download_projects(340, 360)
#download_projects(360, 400)
#download_projects(400, 410)
#download_projects(410, 420)
#download_projects(420, 430)
#download_projects(430, 450)
#download_projects(450, 490)
#download_projects(490, 495)
#download_projects(495, 500)
#download_projects(500, 520)
#download_projects(521, 550)
#download_projects(551, 600)
#download_projects(601, 650)
#download_projects(651, 700)
#download_projects(701, 750)
#download_projects(751, 800)
#download_projects(800, 820)
#download_projects(821, 850)
#download_projects(850, 900)
#download_projects(900, 920)
#download_projects(920, 940)
#download_projects(940, 960)
#download_projects(960, 1000)
#download_projects(1000, 1100)
#download_projects(1100, 1300)
#download_projects(1300, 1500)
#download_projects(1500, 1600)
#download_projects(1600, 1700)
#download_projects(1700, 1800)
#download_projects(1800, 1900)
#download_projects(1900, 1950)
#download_projects(1950, 2000)
#download_projects(2000, 2100)
#download_projects(2100, 2200)
#download_projects(2200, 2300)
#download_projects(2300, 2330)
#download_projects(2330, 3000)
#download_projects(3000, 3670)
#download_projects(3670, 5000)
#download_projects(5000, 8000)
# <placeholder>
from include.sync_views_to_tables import *
| query = """select COUNT(*) FROM GithubProjectUnfiltered WHERE GITHUB_URL = ? """
return c.execute(query, (url,)).fetchone()[0] == 1 | identifier_body |
fetch_github_projects.py | #!/usr/bin/env python3
import sqlite3
import argparse
import urllib.request, json
import math
import os
import time
import sys
import datetime
import re
import subprocess
dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.join(dir, '../projects')
conn = sqlite3.connect(os.path.join(dir, "../database.db"))
conn.execute("PRAGMA journal_mode=WAL")
c = conn.cursor()
per_page = 100
github_login_user = os.environ.get('GITHUB_CLIENT_ID', None)
github_login_password = os.environ.get('GITHUB_CLIENT_PASSWORD', None)
if github_login_user is None or github_login_password is None:
print('Please set up the Github credentials (environment variables GITHUB_CLIENT_ID and GITHUB_CLIENT_PASSWORD) due to the rate limit (see https://developer.github.com/v3/#authentication)!')
credentials_set = False
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page)
else:
credentials_set = True
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page) + "&client_id=%s&client_secret=%s"
def get_last_commit_hash(path):
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
hash = stdout.decode().strip('\n')
return hash
def get_git_commit_count(path):
""" Gets the number of commits without merges from a Git repository. """
process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
number = stdout.decode().strip("\n")
return int(number)
def get_git_commiter_count(path):
""" Gets the number of committers from a Git repository. """
process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
committers = stdout.decode("ISO-8859-1")
return len(committers.split('\n'))
def get_first_last_commit_date(path):
""" Gets the first and repository commit as a timestamp. """
# %at specifies a UNIX time stamp
process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
log = stdout.decode().strip('\n').split('\n')
last = int(log[0])
first = int(log[-1])
return (first, last)
def get_c_cpp_h_assembly_loc(path):
""" Gets the LOC of header and C files using cloc. """
try:
process = subprocess.Popen(['cloc', '.'], cwd=path, stdout=subprocess.PIPE)
except FileNotFoundError:
print("Failed to call cloc (see https://github.com/AlDanial/cloc), please install.")
exit(-1)
stdout, _ = process.communicate()
lines = stdout.decode().split('\n')
c_lines = 0
h_lines = 0
cpp_lines = 0
assembly_lines = 0
for line in lines:
c_match = re.match(r'C \s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if c_match:
c_lines = int(c_match.groups()[0])
h_match = re.match(r'C/C\+\+\sHeader\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if h_match:
h_lines = int(h_match.groups()[0])
cpp_match = re.match(r'C\+\+\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if cpp_match:
cpp_lines = int(cpp_match.groups()[0])
assembly_match = re.match(r'Assembly\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if assembly_match:
assembly_lines = int(assembly_match.groups()[0])
return (c_lines, cpp_lines, h_lines, assembly_lines)
def | (url):
""" Extracts owner and project name from a Github URL. For example, for
https://github.com/graalvm/sulong it returns the tuple (graalvm, sulong). """
if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):
print(str(url) + "is not a valid url!")
exit(-1)
elements = url.split('/')
project_name = elements[-1]
organization_name = elements[-2]
return (organization_name, project_name)
def get_project_dir(url):
""" Map a Github URL to the local Github project directory. """
(project_owner, project_name) = owner_project_from_github_url(url)
project_dir_name = project_owner + '-' + project_name
project_dir_name = os.path.join(project_dir, project_dir_name)
return project_dir_name
def download_project(url):
project_dir_name = get_project_dir(url)
process = subprocess.Popen(['git', 'clone', url, project_dir_name], cwd=project_dir)
process.communicate()
return project_dir_name
def exists(url):
query = """select COUNT(*) FROM GithubProjectUnfiltered WHERE GITHUB_URL = ? """
return c.execute(query, (url,)).fetchone()[0] == 1
def insert_project_entry(data):
github_url = data[str("html_url")]
dirname = download_project(github_url)
dirs = dirname.rstrip(os.sep).split(os.sep)
commit_count = get_git_commit_count(dirname)
committers_count = get_git_commiter_count(dirname)
(first_date, last_date) = get_first_last_commit_date(dirname)
(organization_name, project_name) = owner_project_from_github_url(github_url)
(c_loc, cpp_loc, h_loc, assembly_loc) = get_c_cpp_h_assembly_loc(dirname)
last_hash = get_last_commit_hash(dirname)
project_name = data['name']
owner_name = data['owner']['login']
stargazers = data['stargazers_count']
forks = data['forks_count']
open_issues = data['open_issues_count']
description = data['description']
watchers = data['watchers_count']
fork = data['fork']
creation_date = datetime.datetime.strptime(data['created_at'], "%Y-%m-%dT%H:%M:%SZ").timestamp()
language = data['language']
query = """insert into GithubProjectUnfiltered(
GITHUB_OWNER_NAME,
GITHUB_PROJECT_NAME,
GITHUB_URL,
GITHUB_DESCRIPTION,
GITHUB_NR_STARGAZERS,
GITHUB_NR_WATCHERS,
GITHUB_NR_FORKS,
GITHUB_NR_OPEN_ISSUES,
GITHUB_REPO_CREATION_DATE,
GITHUB_LANGUAGE,
GITHUB_FORK,
PULL_HASH,
PULL_DATE,
CLOC_LOC_C,
CLOC_LOC_H,
CLOC_LOC_ASSEMBLY,
CLOC_LOC_CPP,
GIT_NR_COMMITS,
GIT_NR_COMMITTERS,
GIT_FIRST_COMMIT_DATE,
GIT_LAST_COMMIT_DATE,
PROCESSED
)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0)
"""
try:
c.execute(query,
(owner_name,
project_name,
github_url,
description,
stargazers,
watchers,
forks,
open_issues,
datetime.datetime.fromtimestamp(creation_date).strftime('%Y-%m-%d'),
language,
fork,
last_hash,
datetime.datetime.now().strftime('%Y-%m-%d'),
c_loc,
h_loc,
assembly_loc,
cpp_loc,
commit_count,
committers_count,
datetime.datetime.fromtimestamp(first_date).strftime('%Y-%m-%d'),
datetime.datetime.fromtimestamp(last_date).strftime('%Y-%m-%d'))
)
conn.commit()
except sqlite3.IntegrityError as e:
print(github_url + " is a duplicate!")
def download_projects(stars_from, stars_to):
minutes = 5
current_stars = stars_from
while current_stars <= stars_to:
page = 1
hasResults = True
while hasResults:
if credentials_set:
github_url = github_template % (current_stars, page, github_login_user, github_login_password)
else:
github_url = github_template % (current_stars, page)
print("parsing projects with %d stars" % (current_stars, ))
try:
req = urllib.request.Request(github_url)
req.add_header('Cache-Control', 'max-age=0')
resp = urllib.request.urlopen(req)
json_file = json.loads(resp.read().decode())
#data = json.loads(url.read().decode())
projects = json_file['items']
count = json_file['total_count']
hasResults = count != 0
if hasResults:
print("starting to process " + str(count) + " items")
for project in projects:
if not exists(project[str("html_url")]):
insert_project_entry(project)
if count < per_page:
hasResults = False
except urllib.error.HTTPError as e:
#if e.code == 422:
# print('reached page limit ' + str(page))
# hasResults = False
if e.code == 403:
print('exceeded rate limit!')
print('waiting for ' + str(minutes) + 'minutes...')
for minute in range(minutes):
print('.', end="")
sys.stdout.flush()
time.sleep(60)
print()
continue
else:
print(github_url + " " + str(e))
exit(-1)
page += 1
current_stars += 1
if hasResults:
print("...waiting for one minute before continuing with " + str(current_stars) + " stars")
for minute in range(4):
print(".", end="")
time.sleep(15)
#download_projects(140, 145)
#download_projects(145, 150)
#download_projects(150, 155)
#download_projects(155, 160)
#download_projects(160, 170)
#download_projects(170, 175)
#download_projects(175, 180)
#download_projects(180, 185)
#download_projects(85, 190)
#download_projects(185, 220)
#download_projects(210, 230)
#download_projects(220, 250)
#download_projects(245, 255)
#download_projects(250, 260)
#download_projects(260, 270)
#download_projects(270, 290)
#download_projects(290, 300)
#download_projects(300, 310)
#download_projects(310, 320)
#download_projects(320, 330)
#download_projects(330, 340)
#download_projects(340, 360)
#download_projects(360, 400)
#download_projects(400, 410)
#download_projects(410, 420)
#download_projects(420, 430)
#download_projects(430, 450)
#download_projects(450, 490)
#download_projects(490, 495)
#download_projects(495, 500)
#download_projects(500, 520)
#download_projects(521, 550)
#download_projects(551, 600)
#download_projects(601, 650)
#download_projects(651, 700)
#download_projects(701, 750)
#download_projects(751, 800)
#download_projects(800, 820)
#download_projects(821, 850)
#download_projects(850, 900)
#download_projects(900, 920)
#download_projects(920, 940)
#download_projects(940, 960)
#download_projects(960, 1000)
#download_projects(1000, 1100)
#download_projects(1100, 1300)
#download_projects(1300, 1500)
#download_projects(1500, 1600)
#download_projects(1600, 1700)
#download_projects(1700, 1800)
#download_projects(1800, 1900)
#download_projects(1900, 1950)
#download_projects(1950, 2000)
#download_projects(2000, 2100)
#download_projects(2100, 2200)
#download_projects(2200, 2300)
#download_projects(2300, 2330)
#download_projects(2330, 3000)
#download_projects(3000, 3670)
#download_projects(3670, 5000)
#download_projects(5000, 8000)
# <placeholder>
from include.sync_views_to_tables import *
| owner_project_from_github_url | identifier_name |
fetch_github_projects.py | #!/usr/bin/env python3
import sqlite3
import argparse
import urllib.request, json
import math
import os
import time
import sys
import datetime
import re
import subprocess
dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.join(dir, '../projects')
conn = sqlite3.connect(os.path.join(dir, "../database.db"))
conn.execute("PRAGMA journal_mode=WAL")
c = conn.cursor()
per_page = 100
github_login_user = os.environ.get('GITHUB_CLIENT_ID', None)
github_login_password = os.environ.get('GITHUB_CLIENT_PASSWORD', None)
if github_login_user is None or github_login_password is None:
print('Please set up the Github credentials (environment variables GITHUB_CLIENT_ID and GITHUB_CLIENT_PASSWORD) due to the rate limit (see https://developer.github.com/v3/#authentication)!')
credentials_set = False
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page)
else:
credentials_set = True
github_template = "https://api.github.com/search/repositories?q=+language:c+stars:%d&page=%d&per_page=" + str(per_page) + "&client_id=%s&client_secret=%s"
def get_last_commit_hash(path):
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
hash = stdout.decode().strip('\n')
return hash
def get_git_commit_count(path):
""" Gets the number of commits without merges from a Git repository. """
process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
number = stdout.decode().strip("\n")
return int(number)
def get_git_commiter_count(path):
""" Gets the number of committers from a Git repository. """
process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
committers = stdout.decode("ISO-8859-1")
return len(committers.split('\n'))
def get_first_last_commit_date(path):
""" Gets the first and repository commit as a timestamp. """
# %at specifies a UNIX time stamp
process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
log = stdout.decode().strip('\n').split('\n')
last = int(log[0])
first = int(log[-1])
return (first, last)
def get_c_cpp_h_assembly_loc(path):
""" Gets the LOC of header and C files using cloc. """
try:
process = subprocess.Popen(['cloc', '.'], cwd=path, stdout=subprocess.PIPE)
except FileNotFoundError:
print("Failed to call cloc (see https://github.com/AlDanial/cloc), please install.")
exit(-1)
stdout, _ = process.communicate()
lines = stdout.decode().split('\n')
c_lines = 0
h_lines = 0
cpp_lines = 0
assembly_lines = 0
for line in lines:
c_match = re.match(r'C \s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if c_match:
c_lines = int(c_match.groups()[0])
h_match = re.match(r'C/C\+\+\sHeader\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if h_match:
h_lines = int(h_match.groups()[0])
cpp_match = re.match(r'C\+\+\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if cpp_match:
cpp_lines = int(cpp_match.groups()[0])
assembly_match = re.match(r'Assembly\s+\d+\s+\d+\s+\d+\s+(\d+)', line, re.X)
if assembly_match:
assembly_lines = int(assembly_match.groups()[0])
return (c_lines, cpp_lines, h_lines, assembly_lines)
def owner_project_from_github_url(url):
""" Extracts owner and project name from a Github URL. For example, for
https://github.com/graalvm/sulong it returns the tuple (graalvm, sulong). """
if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):
print(str(url) + "is not a valid url!")
exit(-1)
elements = url.split('/')
project_name = elements[-1]
organization_name = elements[-2]
return (organization_name, project_name)
def get_project_dir(url):
""" Map a Github URL to the local Github project directory. """
(project_owner, project_name) = owner_project_from_github_url(url)
project_dir_name = project_owner + '-' + project_name
project_dir_name = os.path.join(project_dir, project_dir_name)
return project_dir_name
def download_project(url):
project_dir_name = get_project_dir(url)
process = subprocess.Popen(['git', 'clone', url, project_dir_name], cwd=project_dir)
process.communicate()
return project_dir_name
def exists(url):
query = """select COUNT(*) FROM GithubProjectUnfiltered WHERE GITHUB_URL = ? """
return c.execute(query, (url,)).fetchone()[0] == 1
def insert_project_entry(data):
github_url = data[str("html_url")]
dirname = download_project(github_url)
dirs = dirname.rstrip(os.sep).split(os.sep)
commit_count = get_git_commit_count(dirname)
committers_count = get_git_commiter_count(dirname)
(first_date, last_date) = get_first_last_commit_date(dirname)
(organization_name, project_name) = owner_project_from_github_url(github_url)
(c_loc, cpp_loc, h_loc, assembly_loc) = get_c_cpp_h_assembly_loc(dirname)
last_hash = get_last_commit_hash(dirname)
project_name = data['name']
owner_name = data['owner']['login']
stargazers = data['stargazers_count']
forks = data['forks_count']
open_issues = data['open_issues_count']
description = data['description']
watchers = data['watchers_count']
fork = data['fork']
creation_date = datetime.datetime.strptime(data['created_at'], "%Y-%m-%dT%H:%M:%SZ").timestamp()
language = data['language']
query = """insert into GithubProjectUnfiltered(
GITHUB_OWNER_NAME,
GITHUB_PROJECT_NAME,
GITHUB_URL,
GITHUB_DESCRIPTION,
GITHUB_NR_STARGAZERS,
GITHUB_NR_WATCHERS,
GITHUB_NR_FORKS,
GITHUB_NR_OPEN_ISSUES,
GITHUB_REPO_CREATION_DATE,
GITHUB_LANGUAGE,
GITHUB_FORK,
PULL_HASH,
PULL_DATE,
CLOC_LOC_C,
CLOC_LOC_H,
CLOC_LOC_ASSEMBLY,
CLOC_LOC_CPP,
GIT_NR_COMMITS,
GIT_NR_COMMITTERS,
GIT_FIRST_COMMIT_DATE,
GIT_LAST_COMMIT_DATE,
PROCESSED
)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0)
"""
try:
c.execute(query,
(owner_name,
project_name,
github_url,
description,
stargazers,
watchers,
forks,
open_issues,
datetime.datetime.fromtimestamp(creation_date).strftime('%Y-%m-%d'),
language,
fork,
last_hash,
datetime.datetime.now().strftime('%Y-%m-%d'),
c_loc,
h_loc,
assembly_loc,
cpp_loc,
commit_count,
committers_count,
datetime.datetime.fromtimestamp(first_date).strftime('%Y-%m-%d'),
datetime.datetime.fromtimestamp(last_date).strftime('%Y-%m-%d'))
)
conn.commit()
except sqlite3.IntegrityError as e:
print(github_url + " is a duplicate!")
def download_projects(stars_from, stars_to):
minutes = 5
current_stars = stars_from
while current_stars <= stars_to:
page = 1
hasResults = True
while hasResults:
if credentials_set:
github_url = github_template % (current_stars, page, github_login_user, github_login_password)
else:
github_url = github_template % (current_stars, page)
print("parsing projects with %d stars" % (current_stars, ))
try:
req = urllib.request.Request(github_url)
req.add_header('Cache-Control', 'max-age=0')
resp = urllib.request.urlopen(req)
json_file = json.loads(resp.read().decode())
#data = json.loads(url.read().decode())
projects = json_file['items']
count = json_file['total_count']
hasResults = count != 0
if hasResults:
print("starting to process " + str(count) + " items")
for project in projects:
if not exists(project[str("html_url")]):
insert_project_entry(project)
if count < per_page:
hasResults = False
except urllib.error.HTTPError as e:
#if e.code == 422:
# print('reached page limit ' + str(page))
# hasResults = False
if e.code == 403:
print('exceeded rate limit!')
print('waiting for ' + str(minutes) + 'minutes...')
for minute in range(minutes):
print('.', end="")
sys.stdout.flush()
time.sleep(60)
print()
continue
else:
print(github_url + " " + str(e))
exit(-1)
page += 1
current_stars += 1
if hasResults:
print("...waiting for one minute before continuing with " + str(current_stars) + " stars")
for minute in range(4):
print(".", end="")
time.sleep(15)
#download_projects(140, 145)
#download_projects(145, 150)
#download_projects(150, 155)
#download_projects(155, 160)
#download_projects(160, 170)
#download_projects(170, 175)
#download_projects(175, 180)
#download_projects(180, 185)
#download_projects(85, 190)
#download_projects(185, 220)
#download_projects(210, 230)
#download_projects(220, 250)
#download_projects(245, 255)
#download_projects(250, 260)
#download_projects(260, 270)
#download_projects(270, 290)
#download_projects(290, 300)
#download_projects(300, 310)
#download_projects(310, 320)
#download_projects(320, 330)
#download_projects(330, 340)
#download_projects(340, 360)
#download_projects(360, 400)
#download_projects(400, 410)
#download_projects(410, 420)
#download_projects(420, 430)
#download_projects(430, 450)
#download_projects(450, 490)
#download_projects(490, 495)
#download_projects(495, 500)
#download_projects(500, 520) | #download_projects(521, 550)
#download_projects(551, 600)
#download_projects(601, 650)
#download_projects(651, 700)
#download_projects(701, 750)
#download_projects(751, 800)
#download_projects(800, 820)
#download_projects(821, 850)
#download_projects(850, 900)
#download_projects(900, 920)
#download_projects(920, 940)
#download_projects(940, 960)
#download_projects(960, 1000)
#download_projects(1000, 1100)
#download_projects(1100, 1300)
#download_projects(1300, 1500)
#download_projects(1500, 1600)
#download_projects(1600, 1700)
#download_projects(1700, 1800)
#download_projects(1800, 1900)
#download_projects(1900, 1950)
#download_projects(1950, 2000)
#download_projects(2000, 2100)
#download_projects(2100, 2200)
#download_projects(2200, 2300)
#download_projects(2300, 2330)
#download_projects(2330, 3000)
#download_projects(3000, 3670)
#download_projects(3670, 5000)
#download_projects(5000, 8000)
# <placeholder>
from include.sync_views_to_tables import * | random_line_split | |
partition_fsm.go | // Copyright 2018 The Chubao Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package metanode
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"sync/atomic"
"time"
"io/ioutil"
"os"
"path"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/util/exporter"
"github.com/chubaofs/chubaofs/util/log"
"github.com/tiglabs/raft"
raftproto "github.com/tiglabs/raft/proto"
)
// Apply applies the given operational commands.
func (mp *metaPartition) Apply(command []byte, index uint64) (resp interface{}, err error) {
msg := &MetaItem{}
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
if err = msg.UnmarshalJson(command); err != nil {
return
}
switch msg.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
if mp.config.Cursor < ino.Inode {
mp.config.Cursor = ino.Inode
}
resp = mp.fsmCreateInode(ino)
case opFSMUnlinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUnlinkInode(ino)
case opFSMUnlinkInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmUnlinkInodeBatch(inodes)
case opFSMExtentTruncate:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmExtentsTruncate(ino)
case opFSMCreateLinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateLinkInode(ino)
case opFSMEvictInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmEvictInode(ino)
case opFSMEvictInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchEvictInode(inodes)
case opFSMSetAttr:
req := &SetattrRequest{}
err = json.Unmarshal(msg.V, req)
if err != nil {
return
}
err = mp.fsmSetAttr(req)
case opFSMCreateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateDentry(den, false)
case opFSMDeleteDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmDeleteDentry(den, false)
case opFSMDeleteDentryBatch:
db, err := DentryBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchDeleteDentry(db)
case opFSMUpdateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUpdateDentry(den)
case opFSMUpdatePartition:
req := &UpdatePartitionReq{}
if err = json.Unmarshal(msg.V, req); err != nil {
return
}
resp, err = mp.fsmUpdatePartition(req.End)
case opFSMExtentsAdd:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtents(ino)
case opFSMExtentsAddWithCheck:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtentsWithCheck(ino)
case opFSMStoreTick:
inodeTree := mp.getInodeTree()
dentryTree := mp.getDentryTree()
extendTree := mp.extendTree.GetTree()
multipartTree := mp.multipartTree.GetTree()
msg := &storeMsg{
command: opFSMStoreTick,
applyIndex: index,
inodeTree: inodeTree,
dentryTree: dentryTree,
extendTree: extendTree,
multipartTree: multipartTree,
}
mp.storeChan <- msg
case opFSMInternalDeleteInode:
err = mp.internalDelete(msg.V)
case opFSMInternalDeleteInodeBatch:
err = mp.internalDeleteBatch(msg.V)
case opFSMInternalDelExtentFile:
err = mp.delOldExtentFile(msg.V)
case opFSMInternalDelExtentCursor:
err = mp.setExtentDeleteFileCursor(msg.V)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmSetXAttr(extend)
case opFSMRemoveXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmRemoveXAttr(extend)
case opFSMCreateMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmCreateMultipart(multipart)
case opFSMRemoveMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmRemoveMultipart(multipart)
case opFSMAppendMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmAppendMultipart(multipart)
case opFSMSyncCursor:
var cursor uint64
cursor = binary.BigEndian.Uint64(msg.V)
if cursor > mp.config.Cursor {
mp.config.Cursor = cursor
}
}
return
}
| if err == nil {
mp.uploadApplyID(index)
}
}()
// change memory status
var (
updated bool
)
switch confChange.Type {
case raftproto.ConfAddNode:
req := &proto.AddMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confAddNode(req, index)
case raftproto.ConfRemoveNode:
req := &proto.RemoveMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confRemoveNode(req, index)
case raftproto.ConfUpdateNode:
//updated, err = mp.confUpdateNode(req, index)
}
if err != nil {
return
}
if updated {
mp.config.sortPeers()
if err = mp.persistMetadata(); err != nil {
log.LogErrorf("action[ApplyMemberChange] err[%v].", err)
return
}
}
return
}
// Snapshot returns the snapshot of the current meta partition.
func (mp *metaPartition) Snapshot() (snap raftproto.Snapshot, err error) {
snap, err = newMetaItemIterator(mp)
return
}
// ApplySnapshot applies the given snapshots.
func (mp *metaPartition) ApplySnapshot(peers []raftproto.Peer, iter raftproto.SnapIterator) (err error) {
var (
data []byte
index int
appIndexID uint64
cursor uint64
inodeTree = NewBtree()
dentryTree = NewBtree()
extendTree = NewBtree()
multipartTree = NewBtree()
)
defer func() {
if err == io.EOF {
mp.applyID = appIndexID
mp.inodeTree = inodeTree
mp.dentryTree = dentryTree
mp.extendTree = extendTree
mp.multipartTree = multipartTree
mp.config.Cursor = cursor
err = nil
// store message
mp.storeChan <- &storeMsg{
command: opFSMStoreTick,
applyIndex: mp.applyID,
inodeTree: mp.inodeTree,
dentryTree: mp.dentryTree,
extendTree: mp.extendTree,
multipartTree: mp.multipartTree,
}
mp.extReset <- struct{}{}
log.LogDebugf("ApplySnapshot: finish with EOF: partitionID(%v) applyID(%v)", mp.config.PartitionId, mp.applyID)
return
}
log.LogErrorf("ApplySnapshot: stop with error: partitionID(%v) err(%v)", mp.config.PartitionId, err)
}()
for {
data, err = iter.Next()
if err != nil {
return
}
if index == 0 {
appIndexID = binary.BigEndian.Uint64(data)
index++
continue
}
snap := NewMetaItem(0, nil, nil)
if err = snap.UnmarshalBinary(data); err != nil {
return
}
index++
switch snap.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
// TODO Unhandled errors
ino.UnmarshalKey(snap.K)
ino.UnmarshalValue(snap.V)
if cursor < ino.Inode {
cursor = ino.Inode
}
inodeTree.ReplaceOrInsert(ino, true)
log.LogDebugf("ApplySnapshot: create inode: partitonID(%v) inode(%v).", mp.config.PartitionId, ino)
case opFSMCreateDentry:
dentry := &Dentry{}
if err = dentry.UnmarshalKey(snap.K); err != nil {
return
}
if err = dentry.UnmarshalValue(snap.V); err != nil {
return
}
dentryTree.ReplaceOrInsert(dentry, true)
log.LogDebugf("ApplySnapshot: create dentry: partitionID(%v) dentry(%v)", mp.config.PartitionId, dentry)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(snap.V); err != nil {
return
}
extendTree.ReplaceOrInsert(extend, true)
log.LogDebugf("ApplySnapshot: set extend attributes: partitionID(%v) extend(%v)",
mp.config.PartitionId, extend)
case opFSMCreateMultipart:
var multipart = MultipartFromBytes(snap.V)
multipartTree.ReplaceOrInsert(multipart, true)
log.LogDebugf("ApplySnapshot: create multipart: partitionID(%v) multipart(%v)", mp.config.PartitionId, multipart)
case opExtentFileSnapshot:
fileName := string(snap.K)
fileName = path.Join(mp.config.RootDir, fileName)
if err = ioutil.WriteFile(fileName, snap.V, 0644); err != nil {
log.LogErrorf("ApplySnapshot: write snap extent delete file fail: partitionID(%v) err(%v)",
mp.config.PartitionId, err)
}
log.LogDebugf("ApplySnapshot: write snap extent delete file: partitonID(%v) filename(%v).",
mp.config.PartitionId, fileName)
default:
err = fmt.Errorf("unknown op=%d", snap.Op)
return
}
}
}
// HandleFatalEvent handles the fatal errors.
func (mp *metaPartition) HandleFatalEvent(err *raft.FatalError) {
// Panic while fatal event happen.
exporter.Warning(fmt.Sprintf("action[HandleFatalEvent] err[%v].", err))
log.LogFatalf("action[HandleFatalEvent] err[%v].", err)
panic(err.Err)
}
// HandleLeaderChange handles the leader changes.
func (mp *metaPartition) HandleLeaderChange(leader uint64) {
exporter.Warning(fmt.Sprintf("metaPartition(%v) changeLeader to (%v)", mp.config.PartitionId, leader))
if mp.config.NodeId == leader {
conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", serverPort), time.Second)
if err != nil {
log.LogErrorf(fmt.Sprintf("HandleLeaderChange serverPort not exsit ,error %v", err))
go mp.raftPartition.TryToLeader(mp.config.PartitionId)
return
}
log.LogDebugf("[metaPartition] HandleLeaderChange close conn %v, nodeId: %v, leader: %v", serverPort, mp.config.NodeId, leader)
conn.(*net.TCPConn).SetLinger(0)
conn.Close()
}
if mp.config.NodeId != leader {
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become unleader nodeId: %v, leader: %v", mp.config.PartitionId, mp.config.NodeId, leader)
mp.storeChan <- &storeMsg{
command: stopStoreTick,
}
return
}
mp.storeChan <- &storeMsg{
command: startStoreTick,
}
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become leader conn %v, nodeId: %v, leader: %v", mp.config.PartitionId, serverPort, mp.config.NodeId, leader)
if mp.config.Start == 0 && mp.config.Cursor == 0 {
id, err := mp.nextInodeID()
if err != nil {
log.LogFatalf("[HandleLeaderChange] init root inode id: %s.", err.Error())
}
ino := NewInode(id, proto.Mode(os.ModePerm|os.ModeDir))
go mp.initInode(ino)
}
}
// Put puts the given key-value pair (operation key and operation request) into the raft store.
func (mp *metaPartition) submit(op uint32, data []byte) (resp interface{}, err error) {
snap := NewMetaItem(0, nil, nil)
snap.Op = op
if data != nil {
snap.V = data
}
cmd, err := snap.MarshalJson()
if err != nil {
return
}
// submit to the raft store
resp, err = mp.raftPartition.Submit(cmd)
return
}
func (mp *metaPartition) uploadApplyID(applyId uint64) {
atomic.StoreUint64(&mp.applyID, applyId)
} | // ApplyMemberChange apply changes to the raft member.
func (mp *metaPartition) ApplyMemberChange(confChange *raftproto.ConfChange, index uint64) (resp interface{}, err error) {
defer func() { | random_line_split |
partition_fsm.go | // Copyright 2018 The Chubao Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package metanode
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"sync/atomic"
"time"
"io/ioutil"
"os"
"path"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/util/exporter"
"github.com/chubaofs/chubaofs/util/log"
"github.com/tiglabs/raft"
raftproto "github.com/tiglabs/raft/proto"
)
// Apply applies the given operational commands.
func (mp *metaPartition) Apply(command []byte, index uint64) (resp interface{}, err error) {
msg := &MetaItem{}
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
if err = msg.UnmarshalJson(command); err != nil {
return
}
switch msg.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
if mp.config.Cursor < ino.Inode {
mp.config.Cursor = ino.Inode
}
resp = mp.fsmCreateInode(ino)
case opFSMUnlinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUnlinkInode(ino)
case opFSMUnlinkInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmUnlinkInodeBatch(inodes)
case opFSMExtentTruncate:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmExtentsTruncate(ino)
case opFSMCreateLinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateLinkInode(ino)
case opFSMEvictInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmEvictInode(ino)
case opFSMEvictInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchEvictInode(inodes)
case opFSMSetAttr:
req := &SetattrRequest{}
err = json.Unmarshal(msg.V, req)
if err != nil {
return
}
err = mp.fsmSetAttr(req)
case opFSMCreateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateDentry(den, false)
case opFSMDeleteDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmDeleteDentry(den, false)
case opFSMDeleteDentryBatch:
db, err := DentryBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchDeleteDentry(db)
case opFSMUpdateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUpdateDentry(den)
case opFSMUpdatePartition:
req := &UpdatePartitionReq{}
if err = json.Unmarshal(msg.V, req); err != nil {
return
}
resp, err = mp.fsmUpdatePartition(req.End)
case opFSMExtentsAdd:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtents(ino)
case opFSMExtentsAddWithCheck:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtentsWithCheck(ino)
case opFSMStoreTick:
inodeTree := mp.getInodeTree()
dentryTree := mp.getDentryTree()
extendTree := mp.extendTree.GetTree()
multipartTree := mp.multipartTree.GetTree()
msg := &storeMsg{
command: opFSMStoreTick,
applyIndex: index,
inodeTree: inodeTree,
dentryTree: dentryTree,
extendTree: extendTree,
multipartTree: multipartTree,
}
mp.storeChan <- msg
case opFSMInternalDeleteInode:
err = mp.internalDelete(msg.V)
case opFSMInternalDeleteInodeBatch:
err = mp.internalDeleteBatch(msg.V)
case opFSMInternalDelExtentFile:
err = mp.delOldExtentFile(msg.V)
case opFSMInternalDelExtentCursor:
err = mp.setExtentDeleteFileCursor(msg.V)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmSetXAttr(extend)
case opFSMRemoveXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmRemoveXAttr(extend)
case opFSMCreateMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmCreateMultipart(multipart)
case opFSMRemoveMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmRemoveMultipart(multipart)
case opFSMAppendMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmAppendMultipart(multipart)
case opFSMSyncCursor:
var cursor uint64
cursor = binary.BigEndian.Uint64(msg.V)
if cursor > mp.config.Cursor {
mp.config.Cursor = cursor
}
}
return
}
// ApplyMemberChange apply changes to the raft member.
func (mp *metaPartition) ApplyMemberChange(confChange *raftproto.ConfChange, index uint64) (resp interface{}, err error) {
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
// change memory status
var (
updated bool
)
switch confChange.Type {
case raftproto.ConfAddNode:
req := &proto.AddMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confAddNode(req, index)
case raftproto.ConfRemoveNode:
req := &proto.RemoveMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confRemoveNode(req, index)
case raftproto.ConfUpdateNode:
//updated, err = mp.confUpdateNode(req, index)
}
if err != nil {
return
}
if updated {
mp.config.sortPeers()
if err = mp.persistMetadata(); err != nil {
log.LogErrorf("action[ApplyMemberChange] err[%v].", err)
return
}
}
return
}
// Snapshot returns the snapshot of the current meta partition.
func (mp *metaPartition) Snapshot() (snap raftproto.Snapshot, err error) {
snap, err = newMetaItemIterator(mp)
return
}
// ApplySnapshot applies the given snapshots.
func (mp *metaPartition) ApplySnapshot(peers []raftproto.Peer, iter raftproto.SnapIterator) (err error) {
var (
data []byte
index int
appIndexID uint64
cursor uint64
inodeTree = NewBtree()
dentryTree = NewBtree()
extendTree = NewBtree()
multipartTree = NewBtree()
)
defer func() {
if err == io.EOF {
mp.applyID = appIndexID
mp.inodeTree = inodeTree
mp.dentryTree = dentryTree
mp.extendTree = extendTree
mp.multipartTree = multipartTree
mp.config.Cursor = cursor
err = nil
// store message
mp.storeChan <- &storeMsg{
command: opFSMStoreTick,
applyIndex: mp.applyID,
inodeTree: mp.inodeTree,
dentryTree: mp.dentryTree,
extendTree: mp.extendTree,
multipartTree: mp.multipartTree,
}
mp.extReset <- struct{}{}
log.LogDebugf("ApplySnapshot: finish with EOF: partitionID(%v) applyID(%v)", mp.config.PartitionId, mp.applyID)
return
}
log.LogErrorf("ApplySnapshot: stop with error: partitionID(%v) err(%v)", mp.config.PartitionId, err)
}()
for {
data, err = iter.Next()
if err != nil {
return
}
if index == 0 {
appIndexID = binary.BigEndian.Uint64(data)
index++
continue
}
snap := NewMetaItem(0, nil, nil)
if err = snap.UnmarshalBinary(data); err != nil {
return
}
index++
switch snap.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
// TODO Unhandled errors
ino.UnmarshalKey(snap.K)
ino.UnmarshalValue(snap.V)
if cursor < ino.Inode {
cursor = ino.Inode
}
inodeTree.ReplaceOrInsert(ino, true)
log.LogDebugf("ApplySnapshot: create inode: partitonID(%v) inode(%v).", mp.config.PartitionId, ino)
case opFSMCreateDentry:
dentry := &Dentry{}
if err = dentry.UnmarshalKey(snap.K); err != nil {
return
}
if err = dentry.UnmarshalValue(snap.V); err != nil {
return
}
dentryTree.ReplaceOrInsert(dentry, true)
log.LogDebugf("ApplySnapshot: create dentry: partitionID(%v) dentry(%v)", mp.config.PartitionId, dentry)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(snap.V); err != nil {
return
}
extendTree.ReplaceOrInsert(extend, true)
log.LogDebugf("ApplySnapshot: set extend attributes: partitionID(%v) extend(%v)",
mp.config.PartitionId, extend)
case opFSMCreateMultipart:
var multipart = MultipartFromBytes(snap.V)
multipartTree.ReplaceOrInsert(multipart, true)
log.LogDebugf("ApplySnapshot: create multipart: partitionID(%v) multipart(%v)", mp.config.PartitionId, multipart)
case opExtentFileSnapshot:
fileName := string(snap.K)
fileName = path.Join(mp.config.RootDir, fileName)
if err = ioutil.WriteFile(fileName, snap.V, 0644); err != nil {
log.LogErrorf("ApplySnapshot: write snap extent delete file fail: partitionID(%v) err(%v)",
mp.config.PartitionId, err)
}
log.LogDebugf("ApplySnapshot: write snap extent delete file: partitonID(%v) filename(%v).",
mp.config.PartitionId, fileName)
default:
err = fmt.Errorf("unknown op=%d", snap.Op)
return
}
}
}
// HandleFatalEvent handles the fatal errors.
func (mp *metaPartition) HandleFatalEvent(err *raft.FatalError) {
// Panic while fatal event happen.
exporter.Warning(fmt.Sprintf("action[HandleFatalEvent] err[%v].", err))
log.LogFatalf("action[HandleFatalEvent] err[%v].", err)
panic(err.Err)
}
// HandleLeaderChange handles the leader changes.
func (mp *metaPartition) HandleLeaderChange(leader uint64) |
// Put puts the given key-value pair (operation key and operation request) into the raft store.
func (mp *metaPartition) submit(op uint32, data []byte) (resp interface{}, err error) {
snap := NewMetaItem(0, nil, nil)
snap.Op = op
if data != nil {
snap.V = data
}
cmd, err := snap.MarshalJson()
if err != nil {
return
}
// submit to the raft store
resp, err = mp.raftPartition.Submit(cmd)
return
}
func (mp *metaPartition) uploadApplyID(applyId uint64) {
atomic.StoreUint64(&mp.applyID, applyId)
}
| {
exporter.Warning(fmt.Sprintf("metaPartition(%v) changeLeader to (%v)", mp.config.PartitionId, leader))
if mp.config.NodeId == leader {
conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", serverPort), time.Second)
if err != nil {
log.LogErrorf(fmt.Sprintf("HandleLeaderChange serverPort not exsit ,error %v", err))
go mp.raftPartition.TryToLeader(mp.config.PartitionId)
return
}
log.LogDebugf("[metaPartition] HandleLeaderChange close conn %v, nodeId: %v, leader: %v", serverPort, mp.config.NodeId, leader)
conn.(*net.TCPConn).SetLinger(0)
conn.Close()
}
if mp.config.NodeId != leader {
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become unleader nodeId: %v, leader: %v", mp.config.PartitionId, mp.config.NodeId, leader)
mp.storeChan <- &storeMsg{
command: stopStoreTick,
}
return
}
mp.storeChan <- &storeMsg{
command: startStoreTick,
}
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become leader conn %v, nodeId: %v, leader: %v", mp.config.PartitionId, serverPort, mp.config.NodeId, leader)
if mp.config.Start == 0 && mp.config.Cursor == 0 {
id, err := mp.nextInodeID()
if err != nil {
log.LogFatalf("[HandleLeaderChange] init root inode id: %s.", err.Error())
}
ino := NewInode(id, proto.Mode(os.ModePerm|os.ModeDir))
go mp.initInode(ino)
}
} | identifier_body |
partition_fsm.go | // Copyright 2018 The Chubao Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package metanode
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"sync/atomic"
"time"
"io/ioutil"
"os"
"path"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/util/exporter"
"github.com/chubaofs/chubaofs/util/log"
"github.com/tiglabs/raft"
raftproto "github.com/tiglabs/raft/proto"
)
// Apply applies the given operational commands.
func (mp *metaPartition) | (command []byte, index uint64) (resp interface{}, err error) {
msg := &MetaItem{}
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
if err = msg.UnmarshalJson(command); err != nil {
return
}
switch msg.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
if mp.config.Cursor < ino.Inode {
mp.config.Cursor = ino.Inode
}
resp = mp.fsmCreateInode(ino)
case opFSMUnlinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUnlinkInode(ino)
case opFSMUnlinkInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmUnlinkInodeBatch(inodes)
case opFSMExtentTruncate:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmExtentsTruncate(ino)
case opFSMCreateLinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateLinkInode(ino)
case opFSMEvictInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmEvictInode(ino)
case opFSMEvictInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchEvictInode(inodes)
case opFSMSetAttr:
req := &SetattrRequest{}
err = json.Unmarshal(msg.V, req)
if err != nil {
return
}
err = mp.fsmSetAttr(req)
case opFSMCreateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateDentry(den, false)
case opFSMDeleteDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmDeleteDentry(den, false)
case opFSMDeleteDentryBatch:
db, err := DentryBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchDeleteDentry(db)
case opFSMUpdateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUpdateDentry(den)
case opFSMUpdatePartition:
req := &UpdatePartitionReq{}
if err = json.Unmarshal(msg.V, req); err != nil {
return
}
resp, err = mp.fsmUpdatePartition(req.End)
case opFSMExtentsAdd:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtents(ino)
case opFSMExtentsAddWithCheck:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtentsWithCheck(ino)
case opFSMStoreTick:
inodeTree := mp.getInodeTree()
dentryTree := mp.getDentryTree()
extendTree := mp.extendTree.GetTree()
multipartTree := mp.multipartTree.GetTree()
msg := &storeMsg{
command: opFSMStoreTick,
applyIndex: index,
inodeTree: inodeTree,
dentryTree: dentryTree,
extendTree: extendTree,
multipartTree: multipartTree,
}
mp.storeChan <- msg
case opFSMInternalDeleteInode:
err = mp.internalDelete(msg.V)
case opFSMInternalDeleteInodeBatch:
err = mp.internalDeleteBatch(msg.V)
case opFSMInternalDelExtentFile:
err = mp.delOldExtentFile(msg.V)
case opFSMInternalDelExtentCursor:
err = mp.setExtentDeleteFileCursor(msg.V)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmSetXAttr(extend)
case opFSMRemoveXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmRemoveXAttr(extend)
case opFSMCreateMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmCreateMultipart(multipart)
case opFSMRemoveMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmRemoveMultipart(multipart)
case opFSMAppendMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmAppendMultipart(multipart)
case opFSMSyncCursor:
var cursor uint64
cursor = binary.BigEndian.Uint64(msg.V)
if cursor > mp.config.Cursor {
mp.config.Cursor = cursor
}
}
return
}
// ApplyMemberChange apply changes to the raft member.
func (mp *metaPartition) ApplyMemberChange(confChange *raftproto.ConfChange, index uint64) (resp interface{}, err error) {
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
// change memory status
var (
updated bool
)
switch confChange.Type {
case raftproto.ConfAddNode:
req := &proto.AddMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confAddNode(req, index)
case raftproto.ConfRemoveNode:
req := &proto.RemoveMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confRemoveNode(req, index)
case raftproto.ConfUpdateNode:
//updated, err = mp.confUpdateNode(req, index)
}
if err != nil {
return
}
if updated {
mp.config.sortPeers()
if err = mp.persistMetadata(); err != nil {
log.LogErrorf("action[ApplyMemberChange] err[%v].", err)
return
}
}
return
}
// Snapshot returns the snapshot of the current meta partition.
func (mp *metaPartition) Snapshot() (snap raftproto.Snapshot, err error) {
snap, err = newMetaItemIterator(mp)
return
}
// ApplySnapshot applies the given snapshots.
func (mp *metaPartition) ApplySnapshot(peers []raftproto.Peer, iter raftproto.SnapIterator) (err error) {
var (
data []byte
index int
appIndexID uint64
cursor uint64
inodeTree = NewBtree()
dentryTree = NewBtree()
extendTree = NewBtree()
multipartTree = NewBtree()
)
defer func() {
if err == io.EOF {
mp.applyID = appIndexID
mp.inodeTree = inodeTree
mp.dentryTree = dentryTree
mp.extendTree = extendTree
mp.multipartTree = multipartTree
mp.config.Cursor = cursor
err = nil
// store message
mp.storeChan <- &storeMsg{
command: opFSMStoreTick,
applyIndex: mp.applyID,
inodeTree: mp.inodeTree,
dentryTree: mp.dentryTree,
extendTree: mp.extendTree,
multipartTree: mp.multipartTree,
}
mp.extReset <- struct{}{}
log.LogDebugf("ApplySnapshot: finish with EOF: partitionID(%v) applyID(%v)", mp.config.PartitionId, mp.applyID)
return
}
log.LogErrorf("ApplySnapshot: stop with error: partitionID(%v) err(%v)", mp.config.PartitionId, err)
}()
for {
data, err = iter.Next()
if err != nil {
return
}
if index == 0 {
appIndexID = binary.BigEndian.Uint64(data)
index++
continue
}
snap := NewMetaItem(0, nil, nil)
if err = snap.UnmarshalBinary(data); err != nil {
return
}
index++
switch snap.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
// TODO Unhandled errors
ino.UnmarshalKey(snap.K)
ino.UnmarshalValue(snap.V)
if cursor < ino.Inode {
cursor = ino.Inode
}
inodeTree.ReplaceOrInsert(ino, true)
log.LogDebugf("ApplySnapshot: create inode: partitonID(%v) inode(%v).", mp.config.PartitionId, ino)
case opFSMCreateDentry:
dentry := &Dentry{}
if err = dentry.UnmarshalKey(snap.K); err != nil {
return
}
if err = dentry.UnmarshalValue(snap.V); err != nil {
return
}
dentryTree.ReplaceOrInsert(dentry, true)
log.LogDebugf("ApplySnapshot: create dentry: partitionID(%v) dentry(%v)", mp.config.PartitionId, dentry)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(snap.V); err != nil {
return
}
extendTree.ReplaceOrInsert(extend, true)
log.LogDebugf("ApplySnapshot: set extend attributes: partitionID(%v) extend(%v)",
mp.config.PartitionId, extend)
case opFSMCreateMultipart:
var multipart = MultipartFromBytes(snap.V)
multipartTree.ReplaceOrInsert(multipart, true)
log.LogDebugf("ApplySnapshot: create multipart: partitionID(%v) multipart(%v)", mp.config.PartitionId, multipart)
case opExtentFileSnapshot:
fileName := string(snap.K)
fileName = path.Join(mp.config.RootDir, fileName)
if err = ioutil.WriteFile(fileName, snap.V, 0644); err != nil {
log.LogErrorf("ApplySnapshot: write snap extent delete file fail: partitionID(%v) err(%v)",
mp.config.PartitionId, err)
}
log.LogDebugf("ApplySnapshot: write snap extent delete file: partitonID(%v) filename(%v).",
mp.config.PartitionId, fileName)
default:
err = fmt.Errorf("unknown op=%d", snap.Op)
return
}
}
}
// HandleFatalEvent handles the fatal errors.
func (mp *metaPartition) HandleFatalEvent(err *raft.FatalError) {
// Panic while fatal event happen.
exporter.Warning(fmt.Sprintf("action[HandleFatalEvent] err[%v].", err))
log.LogFatalf("action[HandleFatalEvent] err[%v].", err)
panic(err.Err)
}
// HandleLeaderChange handles the leader changes.
func (mp *metaPartition) HandleLeaderChange(leader uint64) {
exporter.Warning(fmt.Sprintf("metaPartition(%v) changeLeader to (%v)", mp.config.PartitionId, leader))
if mp.config.NodeId == leader {
conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", serverPort), time.Second)
if err != nil {
log.LogErrorf(fmt.Sprintf("HandleLeaderChange serverPort not exsit ,error %v", err))
go mp.raftPartition.TryToLeader(mp.config.PartitionId)
return
}
log.LogDebugf("[metaPartition] HandleLeaderChange close conn %v, nodeId: %v, leader: %v", serverPort, mp.config.NodeId, leader)
conn.(*net.TCPConn).SetLinger(0)
conn.Close()
}
if mp.config.NodeId != leader {
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become unleader nodeId: %v, leader: %v", mp.config.PartitionId, mp.config.NodeId, leader)
mp.storeChan <- &storeMsg{
command: stopStoreTick,
}
return
}
mp.storeChan <- &storeMsg{
command: startStoreTick,
}
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become leader conn %v, nodeId: %v, leader: %v", mp.config.PartitionId, serverPort, mp.config.NodeId, leader)
if mp.config.Start == 0 && mp.config.Cursor == 0 {
id, err := mp.nextInodeID()
if err != nil {
log.LogFatalf("[HandleLeaderChange] init root inode id: %s.", err.Error())
}
ino := NewInode(id, proto.Mode(os.ModePerm|os.ModeDir))
go mp.initInode(ino)
}
}
// Put puts the given key-value pair (operation key and operation request) into the raft store.
func (mp *metaPartition) submit(op uint32, data []byte) (resp interface{}, err error) {
snap := NewMetaItem(0, nil, nil)
snap.Op = op
if data != nil {
snap.V = data
}
cmd, err := snap.MarshalJson()
if err != nil {
return
}
// submit to the raft store
resp, err = mp.raftPartition.Submit(cmd)
return
}
func (mp *metaPartition) uploadApplyID(applyId uint64) {
atomic.StoreUint64(&mp.applyID, applyId)
}
| Apply | identifier_name |
partition_fsm.go | // Copyright 2018 The Chubao Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package metanode
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"sync/atomic"
"time"
"io/ioutil"
"os"
"path"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/util/exporter"
"github.com/chubaofs/chubaofs/util/log"
"github.com/tiglabs/raft"
raftproto "github.com/tiglabs/raft/proto"
)
// Apply applies the given operational commands.
func (mp *metaPartition) Apply(command []byte, index uint64) (resp interface{}, err error) {
msg := &MetaItem{}
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
if err = msg.UnmarshalJson(command); err != nil {
return
}
switch msg.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
if mp.config.Cursor < ino.Inode {
mp.config.Cursor = ino.Inode
}
resp = mp.fsmCreateInode(ino)
case opFSMUnlinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUnlinkInode(ino)
case opFSMUnlinkInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmUnlinkInodeBatch(inodes)
case opFSMExtentTruncate:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmExtentsTruncate(ino)
case opFSMCreateLinkInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateLinkInode(ino)
case opFSMEvictInode:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmEvictInode(ino)
case opFSMEvictInodeBatch:
inodes, err := InodeBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchEvictInode(inodes)
case opFSMSetAttr:
req := &SetattrRequest{}
err = json.Unmarshal(msg.V, req)
if err != nil {
return
}
err = mp.fsmSetAttr(req)
case opFSMCreateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmCreateDentry(den, false)
case opFSMDeleteDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmDeleteDentry(den, false)
case opFSMDeleteDentryBatch:
db, err := DentryBatchUnmarshal(msg.V)
if err != nil {
return nil, err
}
resp = mp.fsmBatchDeleteDentry(db)
case opFSMUpdateDentry:
den := &Dentry{}
if err = den.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmUpdateDentry(den)
case opFSMUpdatePartition:
req := &UpdatePartitionReq{}
if err = json.Unmarshal(msg.V, req); err != nil {
return
}
resp, err = mp.fsmUpdatePartition(req.End)
case opFSMExtentsAdd:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtents(ino)
case opFSMExtentsAddWithCheck:
ino := NewInode(0, 0)
if err = ino.Unmarshal(msg.V); err != nil {
return
}
resp = mp.fsmAppendExtentsWithCheck(ino)
case opFSMStoreTick:
inodeTree := mp.getInodeTree()
dentryTree := mp.getDentryTree()
extendTree := mp.extendTree.GetTree()
multipartTree := mp.multipartTree.GetTree()
msg := &storeMsg{
command: opFSMStoreTick,
applyIndex: index,
inodeTree: inodeTree,
dentryTree: dentryTree,
extendTree: extendTree,
multipartTree: multipartTree,
}
mp.storeChan <- msg
case opFSMInternalDeleteInode:
err = mp.internalDelete(msg.V)
case opFSMInternalDeleteInodeBatch:
err = mp.internalDeleteBatch(msg.V)
case opFSMInternalDelExtentFile:
err = mp.delOldExtentFile(msg.V)
case opFSMInternalDelExtentCursor:
err = mp.setExtentDeleteFileCursor(msg.V)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil |
err = mp.fsmSetXAttr(extend)
case opFSMRemoveXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(msg.V); err != nil {
return
}
err = mp.fsmRemoveXAttr(extend)
case opFSMCreateMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmCreateMultipart(multipart)
case opFSMRemoveMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmRemoveMultipart(multipart)
case opFSMAppendMultipart:
var multipart *Multipart
multipart = MultipartFromBytes(msg.V)
resp = mp.fsmAppendMultipart(multipart)
case opFSMSyncCursor:
var cursor uint64
cursor = binary.BigEndian.Uint64(msg.V)
if cursor > mp.config.Cursor {
mp.config.Cursor = cursor
}
}
return
}
// ApplyMemberChange apply changes to the raft member.
func (mp *metaPartition) ApplyMemberChange(confChange *raftproto.ConfChange, index uint64) (resp interface{}, err error) {
defer func() {
if err == nil {
mp.uploadApplyID(index)
}
}()
// change memory status
var (
updated bool
)
switch confChange.Type {
case raftproto.ConfAddNode:
req := &proto.AddMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confAddNode(req, index)
case raftproto.ConfRemoveNode:
req := &proto.RemoveMetaPartitionRaftMemberRequest{}
if err = json.Unmarshal(confChange.Context, req); err != nil {
return
}
updated, err = mp.confRemoveNode(req, index)
case raftproto.ConfUpdateNode:
//updated, err = mp.confUpdateNode(req, index)
}
if err != nil {
return
}
if updated {
mp.config.sortPeers()
if err = mp.persistMetadata(); err != nil {
log.LogErrorf("action[ApplyMemberChange] err[%v].", err)
return
}
}
return
}
// Snapshot returns the snapshot of the current meta partition.
func (mp *metaPartition) Snapshot() (snap raftproto.Snapshot, err error) {
snap, err = newMetaItemIterator(mp)
return
}
// ApplySnapshot applies the given snapshots.
func (mp *metaPartition) ApplySnapshot(peers []raftproto.Peer, iter raftproto.SnapIterator) (err error) {
var (
data []byte
index int
appIndexID uint64
cursor uint64
inodeTree = NewBtree()
dentryTree = NewBtree()
extendTree = NewBtree()
multipartTree = NewBtree()
)
defer func() {
if err == io.EOF {
mp.applyID = appIndexID
mp.inodeTree = inodeTree
mp.dentryTree = dentryTree
mp.extendTree = extendTree
mp.multipartTree = multipartTree
mp.config.Cursor = cursor
err = nil
// store message
mp.storeChan <- &storeMsg{
command: opFSMStoreTick,
applyIndex: mp.applyID,
inodeTree: mp.inodeTree,
dentryTree: mp.dentryTree,
extendTree: mp.extendTree,
multipartTree: mp.multipartTree,
}
mp.extReset <- struct{}{}
log.LogDebugf("ApplySnapshot: finish with EOF: partitionID(%v) applyID(%v)", mp.config.PartitionId, mp.applyID)
return
}
log.LogErrorf("ApplySnapshot: stop with error: partitionID(%v) err(%v)", mp.config.PartitionId, err)
}()
for {
data, err = iter.Next()
if err != nil {
return
}
if index == 0 {
appIndexID = binary.BigEndian.Uint64(data)
index++
continue
}
snap := NewMetaItem(0, nil, nil)
if err = snap.UnmarshalBinary(data); err != nil {
return
}
index++
switch snap.Op {
case opFSMCreateInode:
ino := NewInode(0, 0)
// TODO Unhandled errors
ino.UnmarshalKey(snap.K)
ino.UnmarshalValue(snap.V)
if cursor < ino.Inode {
cursor = ino.Inode
}
inodeTree.ReplaceOrInsert(ino, true)
log.LogDebugf("ApplySnapshot: create inode: partitonID(%v) inode(%v).", mp.config.PartitionId, ino)
case opFSMCreateDentry:
dentry := &Dentry{}
if err = dentry.UnmarshalKey(snap.K); err != nil {
return
}
if err = dentry.UnmarshalValue(snap.V); err != nil {
return
}
dentryTree.ReplaceOrInsert(dentry, true)
log.LogDebugf("ApplySnapshot: create dentry: partitionID(%v) dentry(%v)", mp.config.PartitionId, dentry)
case opFSMSetXAttr:
var extend *Extend
if extend, err = NewExtendFromBytes(snap.V); err != nil {
return
}
extendTree.ReplaceOrInsert(extend, true)
log.LogDebugf("ApplySnapshot: set extend attributes: partitionID(%v) extend(%v)",
mp.config.PartitionId, extend)
case opFSMCreateMultipart:
var multipart = MultipartFromBytes(snap.V)
multipartTree.ReplaceOrInsert(multipart, true)
log.LogDebugf("ApplySnapshot: create multipart: partitionID(%v) multipart(%v)", mp.config.PartitionId, multipart)
case opExtentFileSnapshot:
fileName := string(snap.K)
fileName = path.Join(mp.config.RootDir, fileName)
if err = ioutil.WriteFile(fileName, snap.V, 0644); err != nil {
log.LogErrorf("ApplySnapshot: write snap extent delete file fail: partitionID(%v) err(%v)",
mp.config.PartitionId, err)
}
log.LogDebugf("ApplySnapshot: write snap extent delete file: partitonID(%v) filename(%v).",
mp.config.PartitionId, fileName)
default:
err = fmt.Errorf("unknown op=%d", snap.Op)
return
}
}
}
// HandleFatalEvent handles the fatal errors.
func (mp *metaPartition) HandleFatalEvent(err *raft.FatalError) {
// Panic while fatal event happen.
exporter.Warning(fmt.Sprintf("action[HandleFatalEvent] err[%v].", err))
log.LogFatalf("action[HandleFatalEvent] err[%v].", err)
panic(err.Err)
}
// HandleLeaderChange handles the leader changes.
func (mp *metaPartition) HandleLeaderChange(leader uint64) {
exporter.Warning(fmt.Sprintf("metaPartition(%v) changeLeader to (%v)", mp.config.PartitionId, leader))
if mp.config.NodeId == leader {
conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", serverPort), time.Second)
if err != nil {
log.LogErrorf(fmt.Sprintf("HandleLeaderChange serverPort not exsit ,error %v", err))
go mp.raftPartition.TryToLeader(mp.config.PartitionId)
return
}
log.LogDebugf("[metaPartition] HandleLeaderChange close conn %v, nodeId: %v, leader: %v", serverPort, mp.config.NodeId, leader)
conn.(*net.TCPConn).SetLinger(0)
conn.Close()
}
if mp.config.NodeId != leader {
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become unleader nodeId: %v, leader: %v", mp.config.PartitionId, mp.config.NodeId, leader)
mp.storeChan <- &storeMsg{
command: stopStoreTick,
}
return
}
mp.storeChan <- &storeMsg{
command: startStoreTick,
}
log.LogDebugf("[metaPartition] pid: %v HandleLeaderChange become leader conn %v, nodeId: %v, leader: %v", mp.config.PartitionId, serverPort, mp.config.NodeId, leader)
if mp.config.Start == 0 && mp.config.Cursor == 0 {
id, err := mp.nextInodeID()
if err != nil {
log.LogFatalf("[HandleLeaderChange] init root inode id: %s.", err.Error())
}
ino := NewInode(id, proto.Mode(os.ModePerm|os.ModeDir))
go mp.initInode(ino)
}
}
// Put puts the given key-value pair (operation key and operation request) into the raft store.
func (mp *metaPartition) submit(op uint32, data []byte) (resp interface{}, err error) {
snap := NewMetaItem(0, nil, nil)
snap.Op = op
if data != nil {
snap.V = data
}
cmd, err := snap.MarshalJson()
if err != nil {
return
}
// submit to the raft store
resp, err = mp.raftPartition.Submit(cmd)
return
}
func (mp *metaPartition) uploadApplyID(applyId uint64) {
atomic.StoreUint64(&mp.applyID, applyId)
}
| {
return
} | conditional_block |
options.rs | //! CLI option parsing.
use std::{env, ffi::OsStr, path::Path, path::PathBuf};
use clap::{crate_authors, crate_description, Arg, Command};
use const_format::formatcp;
use crossterm::tty::IsTty;
use itertools::Itertools;
use crate::{
display::style::BackgroundColor,
exit_codes::EXIT_BAD_ARGUMENTS,
parse::guess_language::{language_override_from_name, LanguageOverride},
version::VERSION,
};
pub const DEFAULT_BYTE_LIMIT: usize = 1_000_000;
// Chosen experimentally: this is sufficiently many for all the sample
// files (the highest is slow_before/after.rs at 1.3M nodes), but
// small enough to terminate in ~5 seconds like the test file in #306.
pub const DEFAULT_GRAPH_LIMIT: usize = 3_000_000;
pub const DEFAULT_PARSE_ERROR_LIMIT: usize = 0;
pub const DEFAULT_TAB_WIDTH: usize = 8;
const USAGE: &str = concat!(env!("CARGO_BIN_NAME"), " [OPTIONS] OLD-PATH NEW-PATH");
#[derive(Debug, Clone, Copy)]
pub enum ColorOutput {
Always,
Auto,
Never,
}
#[derive(Debug, Clone)]
pub struct DisplayOptions {
pub background_color: BackgroundColor,
pub use_color: bool,
pub display_mode: DisplayMode,
pub print_unchanged: bool,
pub tab_width: usize,
pub display_width: usize,
pub num_context_lines: u32,
pub in_vcs: bool,
pub syntax_highlight: bool,
}
impl Default for DisplayOptions {
fn default() -> Self {
Self {
background_color: BackgroundColor::Dark,
use_color: false,
display_mode: DisplayMode::SideBySide,
print_unchanged: true,
tab_width: 8,
display_width: 80,
num_context_lines: 3,
in_vcs: false,
syntax_highlight: true,
}
}
}
#[derive(Debug, Clone)]
pub struct DiffOptions {
pub graph_limit: usize,
pub byte_limit: usize,
pub parse_error_limit: usize,
pub check_only: bool,
pub ignore_comments: bool,
}
impl Default for DiffOptions {
fn default() -> Self {
Self {
graph_limit: DEFAULT_GRAPH_LIMIT,
byte_limit: DEFAULT_BYTE_LIMIT,
parse_error_limit: DEFAULT_PARSE_ERROR_LIMIT,
check_only: false,
ignore_comments: false,
}
}
}
fn app() -> clap::Command<'static> {
Command::new("Difftastic")
.override_usage(USAGE)
.version(VERSION.as_str())
.about(crate_description!())
.author(crate_authors!())
.after_long_help(concat!(
"You can compare two files with difftastic by specifying them as arguments.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old.js new.js\n\n",
"You can also use directories as arguments. Difftastic will walk both directories and compare files with matching names.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old/ new/\n\n",
"If you have a file with conflict markers, you can pass it as a single argument. Difftastic will diff the two conflicting file states.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" file_with_conflicts.js\n\n",
"Difftastic can also be invoked with 7 arguments in the format that GIT_EXTERNAL_DIFF expects.\n\n",
"See the full manual at: https://difftastic.wilfred.me.uk/")
)
.arg(
Arg::new("dump-syntax")
.long("dump-syntax")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the difftastic syntax tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("dump-ts")
.long("dump-ts")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the tree-sitter parse tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("context")
.long("context")
.takes_value(true)
.value_name("LINES")
.long_help("The number of contextual lines to show around changed lines.")
.default_value("3")
.env("DFT_CONTEXT")
.validator(|s| s.parse::<u32>())
.required(false),
)
.arg(
Arg::new("width")
.long("width")
.takes_value(true)
.value_name("COLUMNS")
.long_help("Use this many columns when calculating line wrapping. If not specified, difftastic will detect the terminal width.")
.env("DFT_WIDTH")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("tab-width")
.long("tab-width")
.takes_value(true)
.value_name("NUM_SPACES")
.long_help("Treat a tab as this many spaces.")
.env("DFT_TAB_WIDTH")
.default_value(formatcp!("{}", DEFAULT_TAB_WIDTH))
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("display").long("display")
.possible_values(["side-by-side", "side-by-side-show-both", "inline", "json"])
.default_value("side-by-side")
.value_name("MODE")
.env("DFT_DISPLAY")
.help("Display mode for showing results.
side-by-side: Display the before file and the after file in two separate columns, with line numbers aligned according to unchanged content. If a change is exclusively additions or exclusively removals, use a single column.
side-by-side-show-both: The same as side-by-side, but always uses two columns.
inline: A single column display, closer to traditional diff display.
json: Output the results as a machine-readable JSON array with an element per file.")
)
.arg(
Arg::new("color").long("color")
.possible_values(["always", "auto", "never"])
.default_value("auto")
.env("DFT_COLOR")
.value_name("WHEN")
.help("When to use color output.")
)
.arg(
Arg::new("background").long("background")
.value_name("BACKGROUND")
.env("DFT_BACKGROUND")
.possible_values(["dark", "light"])
.default_value("dark")
.help("Set the background brightness. Difftastic will prefer brighter colours on dark backgrounds.")
)
.arg(
Arg::new("syntax-highlight").long("syntax-highlight")
.value_name("on/off")
.env("DFT_SYNTAX_HIGHLIGHT")
.possible_values(["on", "off"])
.default_value("on")
.help("Enable or disable syntax highlighting.")
)
.arg(
Arg::new("exit-code").long("exit-code")
.env("DFT_EXIT_CODE")
.help("Set the exit code to 1 if there are syntactic changes in any files. For files where there is no detected language (e.g. unsupported language or binary files), sets the exit code if there are any byte changes.")
)
.arg(
Arg::new("check-only").long("check-only")
.env("DFT_CHECK_ONLY")
.help("Report whether there are any changes, but don't calculate them. Much faster.")
)
.arg(
Arg::new("ignore-comments").long("ignore-comments")
.env("DFT_IGNORE_COMMENTS")
.help("Don't consider comments when diffing.")
)
.arg(
Arg::new("skip-unchanged").long("skip-unchanged")
.help("Don't display anything if a file is unchanged.")
)
.arg(
Arg::new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf |
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let parse_error_limit = matches
.value_of("parse-error-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let tab_width = matches
.value_of("tab-width")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let num_context_lines = matches
.value_of("context")
.expect("Always present as we've given clap a default")
.parse::<u32>()
.expect("Value already validated by clap");
let print_unchanged = !matches.is_present("skip-unchanged");
let set_exit_code = matches.is_present("exit-code");
let check_only = matches.is_present("check-only");
let diff_options = DiffOptions {
graph_limit,
byte_limit,
parse_error_limit,
check_only,
ignore_comments,
};
let args: Vec<_> = matches.values_of_os("paths").unwrap_or_default().collect();
info!("CLI arguments: {:?}", args);
// TODO: document these different ways of calling difftastic.
let (display_path, lhs_path, rhs_path, old_path, in_vcs) = match &args[..] {
[lhs_path, rhs_path] => {
let lhs_arg = FileArgument::from_cli_argument(lhs_path);
let rhs_arg = FileArgument::from_cli_argument(rhs_path);
let display_path = build_display_path(&lhs_arg, &rhs_arg);
(display_path, lhs_arg, rhs_arg, None, false)
}
[display_path, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode] => {
// https://git-scm.com/docs/git#Documentation/git.txt-codeGITEXTERNALDIFFcode
(
display_path.to_string_lossy().to_string(),
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
None,
true,
)
}
[old_name, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode, new_name, _similarity] =>
{
// Rename file.
// TODO: where does git document these 9 arguments?
let old_name = old_name.to_string_lossy().to_string();
let new_name = new_name.to_string_lossy().to_string();
let renamed = format!("Renamed from {} to {}", old_name, new_name);
(
new_name,
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
Some(renamed),
true,
)
}
[path] => {
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs: true,
};
let display_path = path.to_string_lossy().to_string();
let path = FileArgument::from_path_argument(path);
return Mode::DiffFromConflicts {
display_path,
path,
diff_options,
display_options,
set_exit_code,
language_overrides,
};
}
_ => {
if !args.is_empty() {
eprintln!(
"error: Difftastic does not support being called with {} argument{}.\n",
args.len(),
if args.len() == 1 { "" } else { "s" }
);
}
eprintln!("USAGE:\n\n {}\n", USAGE);
eprintln!("For more information try --help");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
};
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs,
};
Mode::Diff {
diff_options,
display_options,
set_exit_code,
language_overrides,
lhs_path,
rhs_path,
display_path,
old_path,
}
}
/// Choose the display width: try to autodetect, or fall back to a
/// sensible default.
fn detect_display_width() -> usize {
if let Ok((columns, _rows)) = crossterm::terminal::size() {
return columns.into();
}
80
}
pub fn should_use_color(color_output: ColorOutput) -> bool {
match color_output {
ColorOutput::Always => true,
ColorOutput::Auto => {
// Always enable colour if stdout is a TTY or if the git pager is active.
// TODO: consider following the env parsing logic in git_config_bool
// in config.c.
std::io::stdout().is_tty() || env::var("GIT_PAGER_IN_USE").is_ok()
}
ColorOutput::Never => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_app() {
app().debug_assert();
}
#[test]
fn test_detect_display_width() {
// Basic smoke test.
assert!(detect_display_width() > 10);
}
}
| {
path.canonicalize().unwrap_or_else(|_| path.into())
} | identifier_body |
options.rs | //! CLI option parsing.
use std::{env, ffi::OsStr, path::Path, path::PathBuf};
use clap::{crate_authors, crate_description, Arg, Command};
use const_format::formatcp;
use crossterm::tty::IsTty;
use itertools::Itertools;
use crate::{
display::style::BackgroundColor,
exit_codes::EXIT_BAD_ARGUMENTS,
parse::guess_language::{language_override_from_name, LanguageOverride},
version::VERSION,
};
pub const DEFAULT_BYTE_LIMIT: usize = 1_000_000;
// Chosen experimentally: this is sufficiently many for all the sample
// files (the highest is slow_before/after.rs at 1.3M nodes), but
// small enough to terminate in ~5 seconds like the test file in #306.
pub const DEFAULT_GRAPH_LIMIT: usize = 3_000_000;
pub const DEFAULT_PARSE_ERROR_LIMIT: usize = 0;
pub const DEFAULT_TAB_WIDTH: usize = 8;
const USAGE: &str = concat!(env!("CARGO_BIN_NAME"), " [OPTIONS] OLD-PATH NEW-PATH");
#[derive(Debug, Clone, Copy)]
pub enum ColorOutput {
Always,
Auto,
Never,
}
#[derive(Debug, Clone)]
pub struct DisplayOptions {
pub background_color: BackgroundColor,
pub use_color: bool,
pub display_mode: DisplayMode,
pub print_unchanged: bool,
pub tab_width: usize,
pub display_width: usize,
pub num_context_lines: u32,
pub in_vcs: bool,
pub syntax_highlight: bool,
}
impl Default for DisplayOptions {
fn default() -> Self {
Self {
background_color: BackgroundColor::Dark,
use_color: false,
display_mode: DisplayMode::SideBySide,
print_unchanged: true,
tab_width: 8,
display_width: 80,
num_context_lines: 3,
in_vcs: false,
syntax_highlight: true,
}
}
}
#[derive(Debug, Clone)]
pub struct DiffOptions {
pub graph_limit: usize,
pub byte_limit: usize,
pub parse_error_limit: usize,
pub check_only: bool,
pub ignore_comments: bool,
}
impl Default for DiffOptions {
fn default() -> Self {
Self {
graph_limit: DEFAULT_GRAPH_LIMIT,
byte_limit: DEFAULT_BYTE_LIMIT,
parse_error_limit: DEFAULT_PARSE_ERROR_LIMIT,
check_only: false,
ignore_comments: false,
}
}
}
fn app() -> clap::Command<'static> {
Command::new("Difftastic")
.override_usage(USAGE)
.version(VERSION.as_str())
.about(crate_description!())
.author(crate_authors!())
.after_long_help(concat!(
"You can compare two files with difftastic by specifying them as arguments.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old.js new.js\n\n",
"You can also use directories as arguments. Difftastic will walk both directories and compare files with matching names.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old/ new/\n\n",
"If you have a file with conflict markers, you can pass it as a single argument. Difftastic will diff the two conflicting file states.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" file_with_conflicts.js\n\n",
"Difftastic can also be invoked with 7 arguments in the format that GIT_EXTERNAL_DIFF expects.\n\n",
"See the full manual at: https://difftastic.wilfred.me.uk/")
)
.arg(
Arg::new("dump-syntax")
.long("dump-syntax")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the difftastic syntax tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("dump-ts")
.long("dump-ts")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the tree-sitter parse tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("context")
.long("context")
.takes_value(true)
.value_name("LINES")
.long_help("The number of contextual lines to show around changed lines.")
.default_value("3")
.env("DFT_CONTEXT")
.validator(|s| s.parse::<u32>())
.required(false),
)
.arg(
Arg::new("width")
.long("width")
.takes_value(true)
.value_name("COLUMNS")
.long_help("Use this many columns when calculating line wrapping. If not specified, difftastic will detect the terminal width.")
.env("DFT_WIDTH")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("tab-width")
.long("tab-width")
.takes_value(true)
.value_name("NUM_SPACES")
.long_help("Treat a tab as this many spaces.")
.env("DFT_TAB_WIDTH")
.default_value(formatcp!("{}", DEFAULT_TAB_WIDTH))
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("display").long("display")
.possible_values(["side-by-side", "side-by-side-show-both", "inline", "json"])
.default_value("side-by-side")
.value_name("MODE")
.env("DFT_DISPLAY")
.help("Display mode for showing results.
side-by-side: Display the before file and the after file in two separate columns, with line numbers aligned according to unchanged content. If a change is exclusively additions or exclusively removals, use a single column.
side-by-side-show-both: The same as side-by-side, but always uses two columns.
inline: A single column display, closer to traditional diff display.
json: Output the results as a machine-readable JSON array with an element per file.")
)
.arg(
Arg::new("color").long("color")
.possible_values(["always", "auto", "never"])
.default_value("auto")
.env("DFT_COLOR")
.value_name("WHEN")
.help("When to use color output.")
)
.arg(
Arg::new("background").long("background")
.value_name("BACKGROUND")
.env("DFT_BACKGROUND")
.possible_values(["dark", "light"])
.default_value("dark")
.help("Set the background brightness. Difftastic will prefer brighter colours on dark backgrounds.")
)
.arg(
Arg::new("syntax-highlight").long("syntax-highlight")
.value_name("on/off")
.env("DFT_SYNTAX_HIGHLIGHT")
.possible_values(["on", "off"])
.default_value("on")
.help("Enable or disable syntax highlighting.")
)
.arg(
Arg::new("exit-code").long("exit-code")
.env("DFT_EXIT_CODE")
.help("Set the exit code to 1 if there are syntactic changes in any files. For files where there is no detected language (e.g. unsupported language or binary files), sets the exit code if there are any byte changes.")
)
.arg(
Arg::new("check-only").long("check-only")
.env("DFT_CHECK_ONLY")
.help("Report whether there are any changes, but don't calculate them. Much faster.")
)
.arg(
Arg::new("ignore-comments").long("ignore-comments")
.env("DFT_IGNORE_COMMENTS")
.help("Don't consider comments when diffing.")
)
.arg(
Arg::new("skip-unchanged").long("skip-unchanged")
.help("Don't display anything if a file is unchanged.")
)
.arg(
Arg::new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| path.into())
}
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let parse_error_limit = matches |
let tab_width = matches
.value_of("tab-width")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let num_context_lines = matches
.value_of("context")
.expect("Always present as we've given clap a default")
.parse::<u32>()
.expect("Value already validated by clap");
let print_unchanged = !matches.is_present("skip-unchanged");
let set_exit_code = matches.is_present("exit-code");
let check_only = matches.is_present("check-only");
let diff_options = DiffOptions {
graph_limit,
byte_limit,
parse_error_limit,
check_only,
ignore_comments,
};
let args: Vec<_> = matches.values_of_os("paths").unwrap_or_default().collect();
info!("CLI arguments: {:?}", args);
// TODO: document these different ways of calling difftastic.
let (display_path, lhs_path, rhs_path, old_path, in_vcs) = match &args[..] {
[lhs_path, rhs_path] => {
let lhs_arg = FileArgument::from_cli_argument(lhs_path);
let rhs_arg = FileArgument::from_cli_argument(rhs_path);
let display_path = build_display_path(&lhs_arg, &rhs_arg);
(display_path, lhs_arg, rhs_arg, None, false)
}
[display_path, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode] => {
// https://git-scm.com/docs/git#Documentation/git.txt-codeGITEXTERNALDIFFcode
(
display_path.to_string_lossy().to_string(),
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
None,
true,
)
}
[old_name, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode, new_name, _similarity] =>
{
// Rename file.
// TODO: where does git document these 9 arguments?
let old_name = old_name.to_string_lossy().to_string();
let new_name = new_name.to_string_lossy().to_string();
let renamed = format!("Renamed from {} to {}", old_name, new_name);
(
new_name,
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
Some(renamed),
true,
)
}
[path] => {
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs: true,
};
let display_path = path.to_string_lossy().to_string();
let path = FileArgument::from_path_argument(path);
return Mode::DiffFromConflicts {
display_path,
path,
diff_options,
display_options,
set_exit_code,
language_overrides,
};
}
_ => {
if !args.is_empty() {
eprintln!(
"error: Difftastic does not support being called with {} argument{}.\n",
args.len(),
if args.len() == 1 { "" } else { "s" }
);
}
eprintln!("USAGE:\n\n {}\n", USAGE);
eprintln!("For more information try --help");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
};
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs,
};
Mode::Diff {
diff_options,
display_options,
set_exit_code,
language_overrides,
lhs_path,
rhs_path,
display_path,
old_path,
}
}
/// Choose the display width: try to autodetect, or fall back to a
/// sensible default.
fn detect_display_width() -> usize {
if let Ok((columns, _rows)) = crossterm::terminal::size() {
return columns.into();
}
80
}
pub fn should_use_color(color_output: ColorOutput) -> bool {
match color_output {
ColorOutput::Always => true,
ColorOutput::Auto => {
// Always enable colour if stdout is a TTY or if the git pager is active.
// TODO: consider following the env parsing logic in git_config_bool
// in config.c.
std::io::stdout().is_tty() || env::var("GIT_PAGER_IN_USE").is_ok()
}
ColorOutput::Never => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_app() {
app().debug_assert();
}
#[test]
fn test_detect_display_width() {
// Basic smoke test.
assert!(detect_display_width() > 10);
}
} | .value_of("parse-error-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap"); | random_line_split |
options.rs | //! CLI option parsing.
use std::{env, ffi::OsStr, path::Path, path::PathBuf};
use clap::{crate_authors, crate_description, Arg, Command};
use const_format::formatcp;
use crossterm::tty::IsTty;
use itertools::Itertools;
use crate::{
display::style::BackgroundColor,
exit_codes::EXIT_BAD_ARGUMENTS,
parse::guess_language::{language_override_from_name, LanguageOverride},
version::VERSION,
};
pub const DEFAULT_BYTE_LIMIT: usize = 1_000_000;
// Chosen experimentally: this is sufficiently many for all the sample
// files (the highest is slow_before/after.rs at 1.3M nodes), but
// small enough to terminate in ~5 seconds like the test file in #306.
pub const DEFAULT_GRAPH_LIMIT: usize = 3_000_000;
pub const DEFAULT_PARSE_ERROR_LIMIT: usize = 0;
pub const DEFAULT_TAB_WIDTH: usize = 8;
const USAGE: &str = concat!(env!("CARGO_BIN_NAME"), " [OPTIONS] OLD-PATH NEW-PATH");
#[derive(Debug, Clone, Copy)]
pub enum ColorOutput {
Always,
Auto,
Never,
}
#[derive(Debug, Clone)]
pub struct DisplayOptions {
pub background_color: BackgroundColor,
pub use_color: bool,
pub display_mode: DisplayMode,
pub print_unchanged: bool,
pub tab_width: usize,
pub display_width: usize,
pub num_context_lines: u32,
pub in_vcs: bool,
pub syntax_highlight: bool,
}
impl Default for DisplayOptions {
fn default() -> Self {
Self {
background_color: BackgroundColor::Dark,
use_color: false,
display_mode: DisplayMode::SideBySide,
print_unchanged: true,
tab_width: 8,
display_width: 80,
num_context_lines: 3,
in_vcs: false,
syntax_highlight: true,
}
}
}
#[derive(Debug, Clone)]
pub struct DiffOptions {
pub graph_limit: usize,
pub byte_limit: usize,
pub parse_error_limit: usize,
pub check_only: bool,
pub ignore_comments: bool,
}
impl Default for DiffOptions {
fn default() -> Self {
Self {
graph_limit: DEFAULT_GRAPH_LIMIT,
byte_limit: DEFAULT_BYTE_LIMIT,
parse_error_limit: DEFAULT_PARSE_ERROR_LIMIT,
check_only: false,
ignore_comments: false,
}
}
}
fn app() -> clap::Command<'static> {
Command::new("Difftastic")
.override_usage(USAGE)
.version(VERSION.as_str())
.about(crate_description!())
.author(crate_authors!())
.after_long_help(concat!(
"You can compare two files with difftastic by specifying them as arguments.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old.js new.js\n\n",
"You can also use directories as arguments. Difftastic will walk both directories and compare files with matching names.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" old/ new/\n\n",
"If you have a file with conflict markers, you can pass it as a single argument. Difftastic will diff the two conflicting file states.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" file_with_conflicts.js\n\n",
"Difftastic can also be invoked with 7 arguments in the format that GIT_EXTERNAL_DIFF expects.\n\n",
"See the full manual at: https://difftastic.wilfred.me.uk/")
)
.arg(
Arg::new("dump-syntax")
.long("dump-syntax")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the difftastic syntax tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("dump-ts")
.long("dump-ts")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the tree-sitter parse tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("context")
.long("context")
.takes_value(true)
.value_name("LINES")
.long_help("The number of contextual lines to show around changed lines.")
.default_value("3")
.env("DFT_CONTEXT")
.validator(|s| s.parse::<u32>())
.required(false),
)
.arg(
Arg::new("width")
.long("width")
.takes_value(true)
.value_name("COLUMNS")
.long_help("Use this many columns when calculating line wrapping. If not specified, difftastic will detect the terminal width.")
.env("DFT_WIDTH")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("tab-width")
.long("tab-width")
.takes_value(true)
.value_name("NUM_SPACES")
.long_help("Treat a tab as this many spaces.")
.env("DFT_TAB_WIDTH")
.default_value(formatcp!("{}", DEFAULT_TAB_WIDTH))
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("display").long("display")
.possible_values(["side-by-side", "side-by-side-show-both", "inline", "json"])
.default_value("side-by-side")
.value_name("MODE")
.env("DFT_DISPLAY")
.help("Display mode for showing results.
side-by-side: Display the before file and the after file in two separate columns, with line numbers aligned according to unchanged content. If a change is exclusively additions or exclusively removals, use a single column.
side-by-side-show-both: The same as side-by-side, but always uses two columns.
inline: A single column display, closer to traditional diff display.
json: Output the results as a machine-readable JSON array with an element per file.")
)
.arg(
Arg::new("color").long("color")
.possible_values(["always", "auto", "never"])
.default_value("auto")
.env("DFT_COLOR")
.value_name("WHEN")
.help("When to use color output.")
)
.arg(
Arg::new("background").long("background")
.value_name("BACKGROUND")
.env("DFT_BACKGROUND")
.possible_values(["dark", "light"])
.default_value("dark")
.help("Set the background brightness. Difftastic will prefer brighter colours on dark backgrounds.")
)
.arg(
Arg::new("syntax-highlight").long("syntax-highlight")
.value_name("on/off")
.env("DFT_SYNTAX_HIGHLIGHT")
.possible_values(["on", "off"])
.default_value("on")
.help("Enable or disable syntax highlighting.")
)
.arg(
Arg::new("exit-code").long("exit-code")
.env("DFT_EXIT_CODE")
.help("Set the exit code to 1 if there are syntactic changes in any files. For files where there is no detected language (e.g. unsupported language or binary files), sets the exit code if there are any byte changes.")
)
.arg(
Arg::new("check-only").long("check-only")
.env("DFT_CHECK_ONLY")
.help("Report whether there are any changes, but don't calculate them. Much faster.")
)
.arg(
Arg::new("ignore-comments").long("ignore-comments")
.env("DFT_IGNORE_COMMENTS")
.help("Don't consider comments when diffing.")
)
.arg(
Arg::new("skip-unchanged").long("skip-unchanged")
.help("Don't display anything if a file is unchanged.")
)
.arg(
Arg::new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| path.into())
}
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let parse_error_limit = matches
.value_of("parse-error-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let tab_width = matches
.value_of("tab-width")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let num_context_lines = matches
.value_of("context")
.expect("Always present as we've given clap a default")
.parse::<u32>()
.expect("Value already validated by clap");
let print_unchanged = !matches.is_present("skip-unchanged");
let set_exit_code = matches.is_present("exit-code");
let check_only = matches.is_present("check-only");
let diff_options = DiffOptions {
graph_limit,
byte_limit,
parse_error_limit,
check_only,
ignore_comments,
};
let args: Vec<_> = matches.values_of_os("paths").unwrap_or_default().collect();
info!("CLI arguments: {:?}", args);
// TODO: document these different ways of calling difftastic.
let (display_path, lhs_path, rhs_path, old_path, in_vcs) = match &args[..] {
[lhs_path, rhs_path] => {
let lhs_arg = FileArgument::from_cli_argument(lhs_path);
let rhs_arg = FileArgument::from_cli_argument(rhs_path);
let display_path = build_display_path(&lhs_arg, &rhs_arg);
(display_path, lhs_arg, rhs_arg, None, false)
}
[display_path, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode] => {
// https://git-scm.com/docs/git#Documentation/git.txt-codeGITEXTERNALDIFFcode
(
display_path.to_string_lossy().to_string(),
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
None,
true,
)
}
[old_name, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode, new_name, _similarity] =>
{
// Rename file.
// TODO: where does git document these 9 arguments?
let old_name = old_name.to_string_lossy().to_string();
let new_name = new_name.to_string_lossy().to_string();
let renamed = format!("Renamed from {} to {}", old_name, new_name);
(
new_name,
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
Some(renamed),
true,
)
}
[path] => {
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs: true,
};
let display_path = path.to_string_lossy().to_string();
let path = FileArgument::from_path_argument(path);
return Mode::DiffFromConflicts {
display_path,
path,
diff_options,
display_options,
set_exit_code,
language_overrides,
};
}
_ => {
if !args.is_empty() {
eprintln!(
"error: Difftastic does not support being called with {} argument{}.\n",
args.len(),
if args.len() == 1 { "" } else { "s" }
);
}
eprintln!("USAGE:\n\n {}\n", USAGE);
eprintln!("For more information try --help");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
};
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs,
};
Mode::Diff {
diff_options,
display_options,
set_exit_code,
language_overrides,
lhs_path,
rhs_path,
display_path,
old_path,
}
}
/// Choose the display width: try to autodetect, or fall back to a
/// sensible default.
fn detect_display_width() -> usize {
if let Ok((columns, _rows)) = crossterm::terminal::size() {
return columns.into();
}
80
}
pub fn should_use_color(color_output: ColorOutput) -> bool {
match color_output {
ColorOutput::Always => true,
ColorOutput::Auto => {
// Always enable colour if stdout is a TTY or if the git pager is active.
// TODO: consider following the env parsing logic in git_config_bool
// in config.c.
std::io::stdout().is_tty() || env::var("GIT_PAGER_IN_USE").is_ok()
}
ColorOutput::Never => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_app() {
app().debug_assert();
}
#[test]
fn | () {
// Basic smoke test.
assert!(detect_display_width() > 10);
}
}
| test_detect_display_width | identifier_name |
CLIEngine.js | /* eslint max-lines-per-function: 'off', no-param-reassign: 'off', arrow-body-style: 'off' */
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const ignore = require('ignore');
const NpmPackageJsonLint = require('./NpmPackageJsonLint');
const Config = require('./Config');
const ConfigValidator = require('./config/ConfigValidator');
const Parser = require('./Parser');
const pkg = require('../package.json');
const DEFAULT_IGNORE_FILENAME = '.npmpackagejsonlintignore';
const FILE_NOT_FOUND_ERROR_CODE = 'ENOENT';
const noIssues = 0;
/**
* CLIEngine configuration object
*
* @typedef {Object} CLIEngineOptions
* @property {string} configFile The configuration file to use.
* @property {string} cwd The value to use for the current working directory.
* @property {boolean} useConfigFiles False disables use of .npmpackagejsonlintrc.json files, npmpackagejsonlint.config.js files, and npmPackageJsonLintConfig object in package.json file.
* @property {Object<string,*>} rules An object of rules to use.
*/
/**
* A lint issue. It could be an error or a warning.
* @typedef {Object} LintIssue
* @param {String} lintId Unique, lowercase, hyphen-separate name for the lint
* @param {String} severity 'error' or 'warning'
* @param {String} node Name of the node in the JSON the lint audits
* @param {String} lintMessage Human-friendly message to users
*/
/**
* A linting result.
* @typedef {Object} LintResult
*
* @property {String} filePath The path to the file that was linted.
* @property {LintIssue[]} issues An array of LintIssues from the run.
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* A result count object.
* @typedef {Object} ResultCounts
*
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* Aggregates the count of errors and warning for a package.json file.
*
* @param {LintIssue[]} issues - Array of LintIssue object from a package.json file.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateCountsPerFile = issues => {
const incrementOne = 1;
return issues.reduce(
(counts, issue) => {
if (issue.severity === 'error') {
counts.errorCount += incrementOne;
} else {
counts.warningCount += incrementOne;
}
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Aggregates the count of errors and warnings for all package.json files.
*
* @param {LintResult[]} results Array of LintIssue objects from all package.json files.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateOverallCounts = results => {
return results.reduce(
(counts, result) => {
counts.errorCount += result.errorCount;
counts.warningCount += result.warningCount;
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Processes package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {Object} configHelper The configuration context.
* @param {String} fileName An optional string representing the package.json file.
* @param {NpmPackageJsonLint} linter NpmPackageJsonLint linter context
* @returns {LintResult} The results for linting on this text.
* @private
*/
const processPackageJsonObject = (packageJsonObj, configHelper, fileName, linter) => {
let filePath;
if (fileName) {
filePath = path.resolve(fileName);
}
const effectiveFileName = fileName || '{}';
const config = configHelper.get(filePath);
const linterResult = linter.lint(packageJsonObj, config.rules);
const counts = aggregateCountsPerFile(linterResult.issues);
const result = {
filePath: `./${path.relative(configHelper.options.cwd, effectiveFileName)}`,
issues: linterResult.issues,
errorCount: counts.errorCount,
warningCount: counts.warningCount
};
return result;
};
/**
* Processes a package.json file.
*
* @param {String} fileName The filename of the file being linted.
* @param {Object} configHelper The configuration context.
* @param {NpmPackageJsonLint} linter Linter context
* @returns {LintResult} The linter results
* @private
*/
const processPackageJsonFile = (fileName, configHelper, linter) => {
const packageJsonObj = Parser.parseJsonFile(path.resolve(fileName));
return processPackageJsonObject(packageJsonObj, configHelper, fileName, linter);
};
/**
* Checks if the given issue is an error issue.
*
* @param {LintIssue} issue npm-package-json-lint issue
* @returns {boolean} True if error, false if warning.
* @private
*/
const isIssueAnError = issue => {
return issue.severity === 'error';
};
/**
* Generates ignorer based on ignore file content.
*
* @param {String} cwd Current work directory.
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Object} Ignorer
*/
const getIgnorer = (cwd, options) => {
const ignoreFilePath = options.ignorePath || DEFAULT_IGNORE_FILENAME;
const absoluteIgnoreFilePath = path.isAbsolute(ignoreFilePath) ? ignoreFilePath : path.resolve(cwd, ignoreFilePath);
let ignoreText = '';
try {
ignoreText = fs.readFileSync(absoluteIgnoreFilePath, 'utf8');
} catch (readError) {
if (readError.code !== FILE_NOT_FOUND_ERROR_CODE) {
throw readError;
}
}
return ignore().add(ignoreText);
};
/**
* Generates a list of files to lint based on a list of provided patterns
*
* @param {Array<String>} patterns An array of patterns
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Array} Files list
*/
const getFileList = (patterns, options) => {
const cwd = (options && options.cwd) || process.cwd();
// step 1 - filter out empty entries
const filteredPatterns = patterns.filter(pattern => pattern.length);
// step 2 - convert directories to globs
const globPatterns = filteredPatterns.map(pattern => {
const suffix = '/**/package.json';
let newPath = pattern;
const resolvedPath = path.resolve(cwd, pattern);
if (fs.existsSync(resolvedPath)) {
const fileStats = fs.statSync(resolvedPath);
if (fileStats.isFile()) {
if (resolvedPath.endsWith(`${path.sep}package.json`)) {
newPath = resolvedPath;
} else {
throw new Error(`Pattern, ${pattern}, is a file, but isn't a package.json file.`);
}
} else if (fileStats.isDirectory()) {
// strip trailing slash(es)
newPath = newPath.replace(/[/\\]$/, '') + suffix;
}
} else {
// string trailing /* (Any number of *s)
newPath = newPath.replace(/[/][*]+$/, '') + suffix;
}
return newPath;
});
const files = [];
const addedFiles = new Set();
const ignorer = getIgnorer(cwd, options);
globPatterns.forEach(pattern => {
const file = path.resolve(cwd, pattern);
if (fs.existsSync(file) && fs.statSync(file).isFile()) {
if (addedFiles.has(file) || ignorer.ignores(path.relative(cwd, file))) {
return;
}
addedFiles.add(file);
files.push(file);
} else {
const globOptions = {
nodir: true,
dot: false,
cwd,
ignore: 'node_modules'
};
let globFiles = glob.sync(pattern, globOptions);
// remove node_module package.json files. Manually doing this instead of using glob ignore
// because of https://github.com/isaacs/node-glob/issues/309
globFiles = globFiles.filter(globFile => !globFile.includes('node_modules'));
globFiles.forEach(globFile => {
const filePath = path.resolve(cwd, globFile);
if (addedFiles.has(filePath) || ignorer.ignores(path.relative(cwd, filePath))) {
return;
}
addedFiles.add(filePath);
files.push(filePath);
});
}
});
return files;
};
/**
* Public CLIEngine class
* @class
*/
class CLIEngine {
/**
* constructor
* @param {CLIEngineOptions} passedOptions The options for the CLIEngine.
* @constructor
*/
constructor(passedOptions) {
const options = Object.assign(Object.create(null), {cwd: process.cwd()}, passedOptions);
this.options = options;
this.version = pkg.version;
this.linter = new NpmPackageJsonLint();
if (this.options.rules && Object.keys(this.options.rules).length) {
ConfigValidator.validateRules(this.options.rules, 'cli', this.linter);
}
this.config = new Config(this.options, this.linter);
}
/**
* Gets rules from linter
*
* @returns {Object} Rules object containing the ruleId and path to rule module file.
*/
getRules() {
return this.linter.getRules();
}
/**
* Filters results to only include errors.
*
* @param {LintResult[]} results The results to filter.
* @returns {LintResult[]} The filtered results.
*/
static | (results) {
const filtered = [];
results.forEach(result => {
const filteredIssues = result.issues.filter(isIssueAnError);
if (filteredIssues.length > noIssues) {
const filteredResult = {
issues: filteredIssues,
errorCount: filteredIssues.length,
warningCount: 0
};
filtered.push(Object.assign(result, filteredResult));
}
});
return filtered;
}
/**
* Executes the current configuration on an array of file and directory names.
* @param {string[]} patterns An array of file and directory names.
* @returns {Object} The results for all files that were linted.
*/
executeOnPackageJsonFiles(patterns) {
const fileList = getFileList(patterns, this.options);
const results = fileList.map(filePath => processPackageJsonFile(filePath, this.config, this.linter));
const stats = aggregateOverallCounts(results);
return {
results,
errorCount: stats.errorCount,
warningCount: stats.warningCount
};
}
/* eslint-disable id-length */
/**
* Executes linter on package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {string} filename An optional string representing the texts filename.
* @returns {Object} The results for the linting.
*/
executeOnPackageJsonObject(packageJsonObj, filename) {
const results = [];
const resolvedFilename = filename && !path.isAbsolute(filename) ? path.resolve(this.options.cwd, filename) : filename;
results.push(processPackageJsonObject(packageJsonObj, this.config, resolvedFilename, this.linter));
const count = aggregateOverallCounts(results);
return {
results,
errorCount: count.errorCount,
warningCount: count.warningCount
};
}
/**
* Returns a configuration object for the given file using
* npm-package-json-lint's configuration rules.
*
* @param {String} filePath The path of the file to get configuration for.
* @returns {Object} A configuration object for the file.
*/
getConfigForFile(filePath) {
return this.config.get(filePath);
}
}
module.exports = CLIEngine;
| getErrorResults | identifier_name |
CLIEngine.js | /* eslint max-lines-per-function: 'off', no-param-reassign: 'off', arrow-body-style: 'off' */
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const ignore = require('ignore');
const NpmPackageJsonLint = require('./NpmPackageJsonLint');
const Config = require('./Config');
const ConfigValidator = require('./config/ConfigValidator');
const Parser = require('./Parser');
const pkg = require('../package.json');
const DEFAULT_IGNORE_FILENAME = '.npmpackagejsonlintignore';
const FILE_NOT_FOUND_ERROR_CODE = 'ENOENT';
const noIssues = 0;
/**
* CLIEngine configuration object
*
* @typedef {Object} CLIEngineOptions
* @property {string} configFile The configuration file to use.
* @property {string} cwd The value to use for the current working directory.
* @property {boolean} useConfigFiles False disables use of .npmpackagejsonlintrc.json files, npmpackagejsonlint.config.js files, and npmPackageJsonLintConfig object in package.json file.
* @property {Object<string,*>} rules An object of rules to use.
*/
/**
* A lint issue. It could be an error or a warning.
* @typedef {Object} LintIssue
* @param {String} lintId Unique, lowercase, hyphen-separate name for the lint
* @param {String} severity 'error' or 'warning'
* @param {String} node Name of the node in the JSON the lint audits
* @param {String} lintMessage Human-friendly message to users
*/
/**
* A linting result.
* @typedef {Object} LintResult
*
* @property {String} filePath The path to the file that was linted.
* @property {LintIssue[]} issues An array of LintIssues from the run.
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* A result count object.
* @typedef {Object} ResultCounts
*
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* Aggregates the count of errors and warning for a package.json file.
*
* @param {LintIssue[]} issues - Array of LintIssue object from a package.json file.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateCountsPerFile = issues => {
const incrementOne = 1;
return issues.reduce(
(counts, issue) => {
if (issue.severity === 'error') {
counts.errorCount += incrementOne;
} else {
counts.warningCount += incrementOne;
}
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Aggregates the count of errors and warnings for all package.json files.
*
* @param {LintResult[]} results Array of LintIssue objects from all package.json files.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateOverallCounts = results => {
return results.reduce(
(counts, result) => {
counts.errorCount += result.errorCount;
counts.warningCount += result.warningCount;
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Processes package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {Object} configHelper The configuration context.
* @param {String} fileName An optional string representing the package.json file.
* @param {NpmPackageJsonLint} linter NpmPackageJsonLint linter context
* @returns {LintResult} The results for linting on this text.
* @private
*/
const processPackageJsonObject = (packageJsonObj, configHelper, fileName, linter) => {
let filePath;
if (fileName) {
filePath = path.resolve(fileName);
}
const effectiveFileName = fileName || '{}';
const config = configHelper.get(filePath);
const linterResult = linter.lint(packageJsonObj, config.rules);
const counts = aggregateCountsPerFile(linterResult.issues);
const result = {
filePath: `./${path.relative(configHelper.options.cwd, effectiveFileName)}`,
issues: linterResult.issues,
errorCount: counts.errorCount,
warningCount: counts.warningCount
};
return result;
};
/**
* Processes a package.json file.
*
* @param {String} fileName The filename of the file being linted.
* @param {Object} configHelper The configuration context.
* @param {NpmPackageJsonLint} linter Linter context
* @returns {LintResult} The linter results
* @private
*/
const processPackageJsonFile = (fileName, configHelper, linter) => {
const packageJsonObj = Parser.parseJsonFile(path.resolve(fileName));
return processPackageJsonObject(packageJsonObj, configHelper, fileName, linter);
};
/**
* Checks if the given issue is an error issue.
*
* @param {LintIssue} issue npm-package-json-lint issue
* @returns {boolean} True if error, false if warning.
* @private
*/
const isIssueAnError = issue => {
return issue.severity === 'error';
};
/**
* Generates ignorer based on ignore file content.
*
* @param {String} cwd Current work directory.
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Object} Ignorer
*/
const getIgnorer = (cwd, options) => {
const ignoreFilePath = options.ignorePath || DEFAULT_IGNORE_FILENAME;
const absoluteIgnoreFilePath = path.isAbsolute(ignoreFilePath) ? ignoreFilePath : path.resolve(cwd, ignoreFilePath);
let ignoreText = '';
try {
ignoreText = fs.readFileSync(absoluteIgnoreFilePath, 'utf8');
} catch (readError) {
if (readError.code !== FILE_NOT_FOUND_ERROR_CODE) {
throw readError;
}
}
return ignore().add(ignoreText);
};
/**
* Generates a list of files to lint based on a list of provided patterns
*
* @param {Array<String>} patterns An array of patterns
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Array} Files list
*/
const getFileList = (patterns, options) => {
const cwd = (options && options.cwd) || process.cwd();
// step 1 - filter out empty entries
const filteredPatterns = patterns.filter(pattern => pattern.length);
// step 2 - convert directories to globs
const globPatterns = filteredPatterns.map(pattern => {
const suffix = '/**/package.json';
let newPath = pattern;
const resolvedPath = path.resolve(cwd, pattern);
if (fs.existsSync(resolvedPath)) | else {
// string trailing /* (Any number of *s)
newPath = newPath.replace(/[/][*]+$/, '') + suffix;
}
return newPath;
});
const files = [];
const addedFiles = new Set();
const ignorer = getIgnorer(cwd, options);
globPatterns.forEach(pattern => {
const file = path.resolve(cwd, pattern);
if (fs.existsSync(file) && fs.statSync(file).isFile()) {
if (addedFiles.has(file) || ignorer.ignores(path.relative(cwd, file))) {
return;
}
addedFiles.add(file);
files.push(file);
} else {
const globOptions = {
nodir: true,
dot: false,
cwd,
ignore: 'node_modules'
};
let globFiles = glob.sync(pattern, globOptions);
// remove node_module package.json files. Manually doing this instead of using glob ignore
// because of https://github.com/isaacs/node-glob/issues/309
globFiles = globFiles.filter(globFile => !globFile.includes('node_modules'));
globFiles.forEach(globFile => {
const filePath = path.resolve(cwd, globFile);
if (addedFiles.has(filePath) || ignorer.ignores(path.relative(cwd, filePath))) {
return;
}
addedFiles.add(filePath);
files.push(filePath);
});
}
});
return files;
};
/**
* Public CLIEngine class
* @class
*/
class CLIEngine {
/**
* constructor
* @param {CLIEngineOptions} passedOptions The options for the CLIEngine.
* @constructor
*/
constructor(passedOptions) {
const options = Object.assign(Object.create(null), {cwd: process.cwd()}, passedOptions);
this.options = options;
this.version = pkg.version;
this.linter = new NpmPackageJsonLint();
if (this.options.rules && Object.keys(this.options.rules).length) {
ConfigValidator.validateRules(this.options.rules, 'cli', this.linter);
}
this.config = new Config(this.options, this.linter);
}
/**
* Gets rules from linter
*
* @returns {Object} Rules object containing the ruleId and path to rule module file.
*/
getRules() {
return this.linter.getRules();
}
/**
* Filters results to only include errors.
*
* @param {LintResult[]} results The results to filter.
* @returns {LintResult[]} The filtered results.
*/
static getErrorResults(results) {
const filtered = [];
results.forEach(result => {
const filteredIssues = result.issues.filter(isIssueAnError);
if (filteredIssues.length > noIssues) {
const filteredResult = {
issues: filteredIssues,
errorCount: filteredIssues.length,
warningCount: 0
};
filtered.push(Object.assign(result, filteredResult));
}
});
return filtered;
}
/**
* Executes the current configuration on an array of file and directory names.
* @param {string[]} patterns An array of file and directory names.
* @returns {Object} The results for all files that were linted.
*/
executeOnPackageJsonFiles(patterns) {
const fileList = getFileList(patterns, this.options);
const results = fileList.map(filePath => processPackageJsonFile(filePath, this.config, this.linter));
const stats = aggregateOverallCounts(results);
return {
results,
errorCount: stats.errorCount,
warningCount: stats.warningCount
};
}
/* eslint-disable id-length */
/**
* Executes linter on package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {string} filename An optional string representing the texts filename.
* @returns {Object} The results for the linting.
*/
executeOnPackageJsonObject(packageJsonObj, filename) {
const results = [];
const resolvedFilename = filename && !path.isAbsolute(filename) ? path.resolve(this.options.cwd, filename) : filename;
results.push(processPackageJsonObject(packageJsonObj, this.config, resolvedFilename, this.linter));
const count = aggregateOverallCounts(results);
return {
results,
errorCount: count.errorCount,
warningCount: count.warningCount
};
}
/**
* Returns a configuration object for the given file using
* npm-package-json-lint's configuration rules.
*
* @param {String} filePath The path of the file to get configuration for.
* @returns {Object} A configuration object for the file.
*/
getConfigForFile(filePath) {
return this.config.get(filePath);
}
}
module.exports = CLIEngine;
| {
const fileStats = fs.statSync(resolvedPath);
if (fileStats.isFile()) {
if (resolvedPath.endsWith(`${path.sep}package.json`)) {
newPath = resolvedPath;
} else {
throw new Error(`Pattern, ${pattern}, is a file, but isn't a package.json file.`);
}
} else if (fileStats.isDirectory()) {
// strip trailing slash(es)
newPath = newPath.replace(/[/\\]$/, '') + suffix;
}
} | conditional_block |
CLIEngine.js | /* eslint max-lines-per-function: 'off', no-param-reassign: 'off', arrow-body-style: 'off' */
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const ignore = require('ignore');
const NpmPackageJsonLint = require('./NpmPackageJsonLint');
const Config = require('./Config');
const ConfigValidator = require('./config/ConfigValidator');
const Parser = require('./Parser');
const pkg = require('../package.json');
const DEFAULT_IGNORE_FILENAME = '.npmpackagejsonlintignore';
const FILE_NOT_FOUND_ERROR_CODE = 'ENOENT';
const noIssues = 0;
/**
* CLIEngine configuration object
*
* @typedef {Object} CLIEngineOptions
* @property {string} configFile The configuration file to use.
* @property {string} cwd The value to use for the current working directory.
* @property {boolean} useConfigFiles False disables use of .npmpackagejsonlintrc.json files, npmpackagejsonlint.config.js files, and npmPackageJsonLintConfig object in package.json file.
* @property {Object<string,*>} rules An object of rules to use.
*/
/**
* A lint issue. It could be an error or a warning.
* @typedef {Object} LintIssue
* @param {String} lintId Unique, lowercase, hyphen-separate name for the lint
* @param {String} severity 'error' or 'warning'
* @param {String} node Name of the node in the JSON the lint audits
* @param {String} lintMessage Human-friendly message to users
*/
/**
* A linting result.
* @typedef {Object} LintResult
*
* @property {String} filePath The path to the file that was linted.
* @property {LintIssue[]} issues An array of LintIssues from the run.
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* A result count object.
* @typedef {Object} ResultCounts
*
* @property {Number} errorCount Number of errors for the result.
* @property {Number} warningCount Number of warnings for the result.
*/
/**
* Aggregates the count of errors and warning for a package.json file.
*
* @param {LintIssue[]} issues - Array of LintIssue object from a package.json file.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateCountsPerFile = issues => {
const incrementOne = 1;
return issues.reduce(
(counts, issue) => {
if (issue.severity === 'error') {
counts.errorCount += incrementOne;
} else {
counts.warningCount += incrementOne;
}
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Aggregates the count of errors and warnings for all package.json files.
*
* @param {LintResult[]} results Array of LintIssue objects from all package.json files.
* @returns {ResultCounts} Counts object
* @private
*/
const aggregateOverallCounts = results => {
return results.reduce(
(counts, result) => {
counts.errorCount += result.errorCount;
counts.warningCount += result.warningCount;
return counts;
},
{
errorCount: 0,
warningCount: 0
}
);
};
/**
* Processes package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {Object} configHelper The configuration context.
* @param {String} fileName An optional string representing the package.json file.
* @param {NpmPackageJsonLint} linter NpmPackageJsonLint linter context
* @returns {LintResult} The results for linting on this text.
* @private
*/
const processPackageJsonObject = (packageJsonObj, configHelper, fileName, linter) => {
let filePath;
if (fileName) {
filePath = path.resolve(fileName);
}
const effectiveFileName = fileName || '{}';
const config = configHelper.get(filePath);
const linterResult = linter.lint(packageJsonObj, config.rules);
const counts = aggregateCountsPerFile(linterResult.issues);
const result = {
filePath: `./${path.relative(configHelper.options.cwd, effectiveFileName)}`,
issues: linterResult.issues,
errorCount: counts.errorCount,
warningCount: counts.warningCount
};
return result;
};
/**
* Processes a package.json file.
*
* @param {String} fileName The filename of the file being linted.
* @param {Object} configHelper The configuration context.
* @param {NpmPackageJsonLint} linter Linter context
* @returns {LintResult} The linter results
* @private
*/
const processPackageJsonFile = (fileName, configHelper, linter) => {
const packageJsonObj = Parser.parseJsonFile(path.resolve(fileName));
return processPackageJsonObject(packageJsonObj, configHelper, fileName, linter);
};
/** | * @private
*/
const isIssueAnError = issue => {
return issue.severity === 'error';
};
/**
* Generates ignorer based on ignore file content.
*
* @param {String} cwd Current work directory.
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Object} Ignorer
*/
const getIgnorer = (cwd, options) => {
const ignoreFilePath = options.ignorePath || DEFAULT_IGNORE_FILENAME;
const absoluteIgnoreFilePath = path.isAbsolute(ignoreFilePath) ? ignoreFilePath : path.resolve(cwd, ignoreFilePath);
let ignoreText = '';
try {
ignoreText = fs.readFileSync(absoluteIgnoreFilePath, 'utf8');
} catch (readError) {
if (readError.code !== FILE_NOT_FOUND_ERROR_CODE) {
throw readError;
}
}
return ignore().add(ignoreText);
};
/**
* Generates a list of files to lint based on a list of provided patterns
*
* @param {Array<String>} patterns An array of patterns
* @param {CLIEngineOptions} options CLIEngineOptions object.
* @returns {Array} Files list
*/
const getFileList = (patterns, options) => {
const cwd = (options && options.cwd) || process.cwd();
// step 1 - filter out empty entries
const filteredPatterns = patterns.filter(pattern => pattern.length);
// step 2 - convert directories to globs
const globPatterns = filteredPatterns.map(pattern => {
const suffix = '/**/package.json';
let newPath = pattern;
const resolvedPath = path.resolve(cwd, pattern);
if (fs.existsSync(resolvedPath)) {
const fileStats = fs.statSync(resolvedPath);
if (fileStats.isFile()) {
if (resolvedPath.endsWith(`${path.sep}package.json`)) {
newPath = resolvedPath;
} else {
throw new Error(`Pattern, ${pattern}, is a file, but isn't a package.json file.`);
}
} else if (fileStats.isDirectory()) {
// strip trailing slash(es)
newPath = newPath.replace(/[/\\]$/, '') + suffix;
}
} else {
// string trailing /* (Any number of *s)
newPath = newPath.replace(/[/][*]+$/, '') + suffix;
}
return newPath;
});
const files = [];
const addedFiles = new Set();
const ignorer = getIgnorer(cwd, options);
globPatterns.forEach(pattern => {
const file = path.resolve(cwd, pattern);
if (fs.existsSync(file) && fs.statSync(file).isFile()) {
if (addedFiles.has(file) || ignorer.ignores(path.relative(cwd, file))) {
return;
}
addedFiles.add(file);
files.push(file);
} else {
const globOptions = {
nodir: true,
dot: false,
cwd,
ignore: 'node_modules'
};
let globFiles = glob.sync(pattern, globOptions);
// remove node_module package.json files. Manually doing this instead of using glob ignore
// because of https://github.com/isaacs/node-glob/issues/309
globFiles = globFiles.filter(globFile => !globFile.includes('node_modules'));
globFiles.forEach(globFile => {
const filePath = path.resolve(cwd, globFile);
if (addedFiles.has(filePath) || ignorer.ignores(path.relative(cwd, filePath))) {
return;
}
addedFiles.add(filePath);
files.push(filePath);
});
}
});
return files;
};
/**
* Public CLIEngine class
* @class
*/
class CLIEngine {
/**
* constructor
* @param {CLIEngineOptions} passedOptions The options for the CLIEngine.
* @constructor
*/
constructor(passedOptions) {
const options = Object.assign(Object.create(null), {cwd: process.cwd()}, passedOptions);
this.options = options;
this.version = pkg.version;
this.linter = new NpmPackageJsonLint();
if (this.options.rules && Object.keys(this.options.rules).length) {
ConfigValidator.validateRules(this.options.rules, 'cli', this.linter);
}
this.config = new Config(this.options, this.linter);
}
/**
* Gets rules from linter
*
* @returns {Object} Rules object containing the ruleId and path to rule module file.
*/
getRules() {
return this.linter.getRules();
}
/**
* Filters results to only include errors.
*
* @param {LintResult[]} results The results to filter.
* @returns {LintResult[]} The filtered results.
*/
static getErrorResults(results) {
const filtered = [];
results.forEach(result => {
const filteredIssues = result.issues.filter(isIssueAnError);
if (filteredIssues.length > noIssues) {
const filteredResult = {
issues: filteredIssues,
errorCount: filteredIssues.length,
warningCount: 0
};
filtered.push(Object.assign(result, filteredResult));
}
});
return filtered;
}
/**
* Executes the current configuration on an array of file and directory names.
* @param {string[]} patterns An array of file and directory names.
* @returns {Object} The results for all files that were linted.
*/
executeOnPackageJsonFiles(patterns) {
const fileList = getFileList(patterns, this.options);
const results = fileList.map(filePath => processPackageJsonFile(filePath, this.config, this.linter));
const stats = aggregateOverallCounts(results);
return {
results,
errorCount: stats.errorCount,
warningCount: stats.warningCount
};
}
/* eslint-disable id-length */
/**
* Executes linter on package.json object
*
* @param {Object} packageJsonObj An object representation of a package.json file.
* @param {string} filename An optional string representing the texts filename.
* @returns {Object} The results for the linting.
*/
executeOnPackageJsonObject(packageJsonObj, filename) {
const results = [];
const resolvedFilename = filename && !path.isAbsolute(filename) ? path.resolve(this.options.cwd, filename) : filename;
results.push(processPackageJsonObject(packageJsonObj, this.config, resolvedFilename, this.linter));
const count = aggregateOverallCounts(results);
return {
results,
errorCount: count.errorCount,
warningCount: count.warningCount
};
}
/**
* Returns a configuration object for the given file using
* npm-package-json-lint's configuration rules.
*
* @param {String} filePath The path of the file to get configuration for.
* @returns {Object} A configuration object for the file.
*/
getConfigForFile(filePath) {
return this.config.get(filePath);
}
}
module.exports = CLIEngine; | * Checks if the given issue is an error issue.
*
* @param {LintIssue} issue npm-package-json-lint issue
* @returns {boolean} True if error, false if warning. | random_line_split |
init.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/incus/client"
config "github.com/lxc/incus/internal/cliconfig"
"github.com/lxc/incus/shared/api"
cli "github.com/lxc/incus/shared/cmd"
"github.com/lxc/incus/shared/i18n"
"github.com/lxc/incus/shared/termios"
)
type cmdInit struct {
global *cmdGlobal
flagConfig []string
flagDevice []string
flagEphemeral bool
flagNetwork string
flagProfile []string
flagStorage string
flagTarget string
flagType string
flagNoProfiles bool
flagEmpty bool
flagVM bool
}
func (c *cmdInit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("init", i18n.G("[<remote>:]<image> [<remote>:][<name>]"))
cmd.Short = i18n.G("Create instances from images")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Create instances from images`))
cmd.Example = cli.FormatSection("", i18n.G(`lxc init images:ubuntu/22.04 u1
lxc init images:ubuntu/22.04 u1 < config.yaml
Create the instance with configuration from config.yaml`))
cmd.Hidden = true
cmd.RunE = c.Run
cmd.Flags().StringArrayVarP(&c.flagConfig, "config", "c", nil, i18n.G("Config key/value to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagProfile, "profile", "p", nil, i18n.G("Profile to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagDevice, "device", "d", nil, i18n.G("New key/value to apply to a specific device")+"``")
cmd.Flags().BoolVarP(&c.flagEphemeral, "ephemeral", "e", false, i18n.G("Ephemeral instance"))
cmd.Flags().StringVarP(&c.flagNetwork, "network", "n", "", i18n.G("Network name")+"``")
cmd.Flags().StringVarP(&c.flagStorage, "storage", "s", "", i18n.G("Storage pool name")+"``")
cmd.Flags().StringVarP(&c.flagType, "type", "t", "", i18n.G("Instance type")+"``")
cmd.Flags().StringVar(&c.flagTarget, "target", "", i18n.G("Cluster member name")+"``")
cmd.Flags().BoolVar(&c.flagNoProfiles, "no-profiles", false, i18n.G("Create the instance with no profiles applied"))
cmd.Flags().BoolVar(&c.flagEmpty, "empty", false, i18n.G("Create an empty instance"))
cmd.Flags().BoolVar(&c.flagVM, "vm", false, i18n.G("Create a virtual machine"))
return cmd
}
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 2)
if exit {
return err
}
if len(args) == 0 && !c.flagEmpty {
_ = cmd.Usage()
return nil
}
_, _, err = c.create(c.global.conf, args)
return err
}
func (c *cmdInit) create(conf *config.Config, args []string) (incus.InstanceServer, string, error) {
var name string
var image string
var remote string
var iremote string
var err error
var stdinData api.InstancePut
var devicesMap map[string]map[string]string
var configMap map[string]string
var profiles []string
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := io.ReadAll(os.Stdin)
if err != nil {
return nil, "", err
}
err = yaml.Unmarshal(contents, &stdinData)
if err != nil {
return nil, "", err
}
}
if len(args) > 0 {
iremote, image, err = conf.ParseRemote(args[0])
if err != nil {
return nil, "", err
}
if len(args) == 1 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 2 {
remote, name, err = conf.ParseRemote(args[1])
if err != nil {
return nil, "", err
}
}
}
if c.flagEmpty {
if len(args) > 1 {
return nil, "", fmt.Errorf(i18n.G("--empty cannot be combined with an image name"))
}
if len(args) == 0 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 1 {
// Switch image / instance names
name = image
remote = iremote
image = ""
iremote = ""
}
}
d, err := conf.GetInstanceServer(remote)
if err != nil {
return nil, "", err
}
if c.flagTarget != "" {
d = d.UseTarget(c.flagTarget)
}
// Overwrite profiles.
if c.flagProfile != nil {
profiles = c.flagProfile
} else if c.flagNoProfiles {
profiles = []string{}
}
if !c.global.flagQuiet {
if name == "" {
fmt.Printf(i18n.G("Creating the instance") + "\n")
} else {
fmt.Printf(i18n.G("Creating %s")+"\n", name)
}
}
if len(stdinData.Devices) > 0 {
devicesMap = stdinData.Devices
} else {
devicesMap = map[string]map[string]string{}
}
if c.flagNetwork != "" {
network, _, err := d.GetNetwork(c.flagNetwork)
if err != nil {
return nil, "", fmt.Errorf("Failed loading network %q: %w", c.flagNetwork, err)
}
// Prepare the instance's NIC device entry.
var device map[string]string
if network.Managed && d.HasExtension("instance_nic_network") {
// If network is managed, use the network property rather than nictype, so that the
// network's inherited properties are loaded into the NIC when started.
device = map[string]string{
"name": "eth0",
"type": "nic",
"network": network.Name,
}
} else {
// If network is unmanaged default to using a macvlan connected to the specified interface.
device = map[string]string{
"name": "eth0",
"type": "nic",
"nictype": "macvlan",
"parent": c.flagNetwork,
}
if network.Type == "bridge" {
// If the network type is an unmanaged bridge, use bridged NIC type.
device["nictype"] = "bridged"
}
}
devicesMap["eth0"] = device
}
if len(stdinData.Config) > 0 {
configMap = stdinData.Config
} else {
configMap = map[string]string{}
}
for _, entry := range c.flagConfig {
if !strings.Contains(entry, "=") {
return nil, "", fmt.Errorf(i18n.G("Bad key=value pair: %s"), entry)
}
fields := strings.SplitN(entry, "=", 2)
configMap[fields[0]] = fields[1]
}
// Check if the specified storage pool exists.
if c.flagStorage != "" {
_, _, err := d.GetStoragePool(c.flagStorage)
if err != nil {
return nil, "", fmt.Errorf("Failed loading storage pool %q: %w", c.flagStorage, err)
}
devicesMap["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": c.flagStorage,
}
}
// Decide whether we are creating a container or a virtual machine.
instanceDBType := api.InstanceTypeContainer
if c.flagVM {
instanceDBType = api.InstanceTypeVM
}
// Setup instance creation request
req := api.InstancesPost{
Name: name,
InstanceType: c.flagType,
Type: instanceDBType,
}
req.Config = configMap
req.Ephemeral = c.flagEphemeral
req.Description = stdinData.Description
if !c.flagNoProfiles && len(profiles) == 0 {
if len(stdinData.Profiles) > 0 {
req.Profiles = stdinData.Profiles
} else {
req.Profiles = nil
}
} else {
req.Profiles = profiles
}
// Handle device overrides.
deviceOverrides, err := parseDeviceOverrides(c.flagDevice)
if err != nil {
return nil, "", err
}
// Check to see if any of the overridden devices are for devices that are not yet defined in the
// local devices (and thus maybe expected to be coming from profiles).
profileDevices := make(map[string]map[string]string)
needProfileExpansion := false
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if !isLocalDevice {
needProfileExpansion = true
break
}
}
// If there are device overrides that are expected to be applied to profile devices then load the profiles
// that would be applied server-side.
if needProfileExpansion {
// If the list of profiles is empty then LXD would apply the default profile on the server side.
serverSideProfiles := req.Profiles
if len(serverSideProfiles) == 0 {
serverSideProfiles = []string{"default"}
}
// Get the effective expanded devices by overlaying each profile's devices in order.
for _, profileName := range serverSideProfiles {
profile, _, err := d.GetProfile(profileName)
if err != nil {
return nil, "", fmt.Errorf(i18n.G("Failed loading profile %q for device override: %w"), profileName, err)
}
for k, v := range profile.Devices {
profileDevices[k] = v
}
}
}
// Apply device overrides.
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if isLocalDevice {
// Apply overrides to local device.
for k, v := range deviceOverrides[deviceName] {
devicesMap[deviceName][k] = v
}
} else {
// Check device exists in expanded profile devices.
profileDeviceConfig, found := profileDevices[deviceName]
if !found {
return nil, "", fmt.Errorf(i18n.G("Cannot override config for device %q: Device not found in profile devices"), deviceName)
}
for k, v := range deviceOverrides[deviceName] {
profileDeviceConfig[k] = v
}
// Add device to local devices.
devicesMap[deviceName] = profileDeviceConfig
}
}
req.Devices = devicesMap
var opInfo api.Operation
if !c.flagEmpty {
// Get the image server and image info
iremote, image = guessImage(conf, d, remote, iremote, image)
// Deal with the default image
if image == "" {
image = "default"
}
imgRemote, imgInfo, err := getImgInfo(d, conf, iremote, remote, image, &req.Source)
if err != nil {
return nil, "", err
}
if conf.Remotes[iremote].Protocol != "simplestreams" {
if imgInfo.Type != "virtual-machine" && c.flagVM {
return nil, "", fmt.Errorf(i18n.G("Asked for a VM but image is of type container"))
}
req.Type = api.InstanceType(imgInfo.Type)
}
// Create the instance
op, err := d.CreateInstanceFromImage(imgRemote, *imgInfo, req)
if err != nil {
return nil, "", err
}
// Watch the background operation
progress := cli.ProgressRenderer{
Format: i18n.G("Retrieving image: %s"),
Quiet: c.global.flagQuiet,
}
_, err = op.AddHandler(progress.UpdateOp)
if err != nil {
progress.Done("")
return nil, "", err
}
err = cli.CancelableWait(op, &progress)
if err != nil {
progress.Done("")
return nil, "", err
}
progress.Done("")
// Extract the instance name
info, err := op.GetTarget()
if err != nil {
return nil, "", err
}
opInfo = *info
} else {
req.Source.Type = "none"
op, err := d.CreateInstance(req)
if err != nil {
return nil, "", err
}
err = op.Wait()
if err != nil {
return nil, "", err
}
opInfo = op.Get()
}
instances, ok := opInfo.Resources["instances"]
if !ok || len(instances) == 0 {
// Try using the older "containers" field
instances, ok = opInfo.Resources["containers"]
if !ok || len(instances) == 0 {
return nil, "", fmt.Errorf(i18n.G("Didn't get any affected image, instance or snapshot from server"))
}
}
if len(instances) == 1 && name == "" {
fields := strings.Split(instances[0], "/")
name = fields[len(fields)-1]
fmt.Printf(i18n.G("Instance name is: %s")+"\n", name)
}
// Validate the network setup
c.checkNetwork(d, name)
return d, name, nil
}
func (c *cmdInit) | (d incus.InstanceServer, name string) {
ct, _, err := d.GetInstance(name)
if err != nil {
return
}
for _, d := range ct.ExpandedDevices {
if d["type"] == "nic" {
return
}
}
fmt.Fprintf(os.Stderr, "\n"+i18n.G("The instance you are starting doesn't have any network attached to it.")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To create a new network, use: lxc network create")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To attach a network to an instance, use: lxc network attach")+"\n\n")
}
| checkNetwork | identifier_name |
init.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/incus/client"
config "github.com/lxc/incus/internal/cliconfig"
"github.com/lxc/incus/shared/api"
cli "github.com/lxc/incus/shared/cmd"
"github.com/lxc/incus/shared/i18n"
"github.com/lxc/incus/shared/termios"
)
type cmdInit struct {
global *cmdGlobal
flagConfig []string
flagDevice []string
flagEphemeral bool
flagNetwork string
flagProfile []string
flagStorage string
flagTarget string
flagType string
flagNoProfiles bool
flagEmpty bool
flagVM bool
}
func (c *cmdInit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("init", i18n.G("[<remote>:]<image> [<remote>:][<name>]"))
cmd.Short = i18n.G("Create instances from images")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Create instances from images`))
cmd.Example = cli.FormatSection("", i18n.G(`lxc init images:ubuntu/22.04 u1
lxc init images:ubuntu/22.04 u1 < config.yaml
Create the instance with configuration from config.yaml`))
cmd.Hidden = true
cmd.RunE = c.Run
cmd.Flags().StringArrayVarP(&c.flagConfig, "config", "c", nil, i18n.G("Config key/value to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagProfile, "profile", "p", nil, i18n.G("Profile to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagDevice, "device", "d", nil, i18n.G("New key/value to apply to a specific device")+"``")
cmd.Flags().BoolVarP(&c.flagEphemeral, "ephemeral", "e", false, i18n.G("Ephemeral instance"))
cmd.Flags().StringVarP(&c.flagNetwork, "network", "n", "", i18n.G("Network name")+"``")
cmd.Flags().StringVarP(&c.flagStorage, "storage", "s", "", i18n.G("Storage pool name")+"``")
cmd.Flags().StringVarP(&c.flagType, "type", "t", "", i18n.G("Instance type")+"``")
cmd.Flags().StringVar(&c.flagTarget, "target", "", i18n.G("Cluster member name")+"``")
cmd.Flags().BoolVar(&c.flagNoProfiles, "no-profiles", false, i18n.G("Create the instance with no profiles applied"))
cmd.Flags().BoolVar(&c.flagEmpty, "empty", false, i18n.G("Create an empty instance"))
cmd.Flags().BoolVar(&c.flagVM, "vm", false, i18n.G("Create a virtual machine"))
return cmd
}
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 2)
if exit {
return err
}
if len(args) == 0 && !c.flagEmpty {
_ = cmd.Usage()
return nil
}
_, _, err = c.create(c.global.conf, args)
return err
}
func (c *cmdInit) create(conf *config.Config, args []string) (incus.InstanceServer, string, error) {
var name string
var image string
var remote string
var iremote string
var err error
var stdinData api.InstancePut
var devicesMap map[string]map[string]string
var configMap map[string]string
var profiles []string
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := io.ReadAll(os.Stdin)
if err != nil {
return nil, "", err
}
err = yaml.Unmarshal(contents, &stdinData)
if err != nil {
return nil, "", err
}
}
if len(args) > 0 {
iremote, image, err = conf.ParseRemote(args[0])
if err != nil {
return nil, "", err
}
if len(args) == 1 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 2 {
remote, name, err = conf.ParseRemote(args[1])
if err != nil {
return nil, "", err
}
}
}
if c.flagEmpty {
if len(args) > 1 {
return nil, "", fmt.Errorf(i18n.G("--empty cannot be combined with an image name"))
}
if len(args) == 0 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 1 {
// Switch image / instance names
name = image
remote = iremote
image = ""
iremote = ""
}
}
d, err := conf.GetInstanceServer(remote)
if err != nil {
return nil, "", err
}
if c.flagTarget != "" {
d = d.UseTarget(c.flagTarget)
}
// Overwrite profiles.
if c.flagProfile != nil {
profiles = c.flagProfile
} else if c.flagNoProfiles {
profiles = []string{}
}
if !c.global.flagQuiet {
if name == "" {
fmt.Printf(i18n.G("Creating the instance") + "\n")
} else {
fmt.Printf(i18n.G("Creating %s")+"\n", name)
}
}
if len(stdinData.Devices) > 0 {
devicesMap = stdinData.Devices
} else {
devicesMap = map[string]map[string]string{}
}
if c.flagNetwork != "" {
network, _, err := d.GetNetwork(c.flagNetwork)
if err != nil {
return nil, "", fmt.Errorf("Failed loading network %q: %w", c.flagNetwork, err)
}
// Prepare the instance's NIC device entry.
var device map[string]string
if network.Managed && d.HasExtension("instance_nic_network") {
// If network is managed, use the network property rather than nictype, so that the
// network's inherited properties are loaded into the NIC when started.
device = map[string]string{
"name": "eth0",
"type": "nic",
"network": network.Name,
}
} else {
// If network is unmanaged default to using a macvlan connected to the specified interface.
device = map[string]string{
"name": "eth0",
"type": "nic",
"nictype": "macvlan",
"parent": c.flagNetwork,
}
if network.Type == "bridge" {
// If the network type is an unmanaged bridge, use bridged NIC type.
device["nictype"] = "bridged"
}
}
devicesMap["eth0"] = device
}
if len(stdinData.Config) > 0 {
configMap = stdinData.Config
} else {
configMap = map[string]string{}
}
for _, entry := range c.flagConfig {
if !strings.Contains(entry, "=") {
return nil, "", fmt.Errorf(i18n.G("Bad key=value pair: %s"), entry)
}
fields := strings.SplitN(entry, "=", 2)
configMap[fields[0]] = fields[1]
}
// Check if the specified storage pool exists.
if c.flagStorage != "" {
_, _, err := d.GetStoragePool(c.flagStorage)
if err != nil {
return nil, "", fmt.Errorf("Failed loading storage pool %q: %w", c.flagStorage, err)
}
devicesMap["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": c.flagStorage,
}
}
// Decide whether we are creating a container or a virtual machine.
instanceDBType := api.InstanceTypeContainer
if c.flagVM {
instanceDBType = api.InstanceTypeVM
}
// Setup instance creation request
req := api.InstancesPost{
Name: name,
InstanceType: c.flagType,
Type: instanceDBType,
}
req.Config = configMap
req.Ephemeral = c.flagEphemeral
req.Description = stdinData.Description
if !c.flagNoProfiles && len(profiles) == 0 {
if len(stdinData.Profiles) > 0 {
req.Profiles = stdinData.Profiles
} else {
req.Profiles = nil
}
} else {
req.Profiles = profiles
}
// Handle device overrides.
deviceOverrides, err := parseDeviceOverrides(c.flagDevice)
if err != nil |
// Check to see if any of the overridden devices are for devices that are not yet defined in the
// local devices (and thus maybe expected to be coming from profiles).
profileDevices := make(map[string]map[string]string)
needProfileExpansion := false
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if !isLocalDevice {
needProfileExpansion = true
break
}
}
// If there are device overrides that are expected to be applied to profile devices then load the profiles
// that would be applied server-side.
if needProfileExpansion {
// If the list of profiles is empty then LXD would apply the default profile on the server side.
serverSideProfiles := req.Profiles
if len(serverSideProfiles) == 0 {
serverSideProfiles = []string{"default"}
}
// Get the effective expanded devices by overlaying each profile's devices in order.
for _, profileName := range serverSideProfiles {
profile, _, err := d.GetProfile(profileName)
if err != nil {
return nil, "", fmt.Errorf(i18n.G("Failed loading profile %q for device override: %w"), profileName, err)
}
for k, v := range profile.Devices {
profileDevices[k] = v
}
}
}
// Apply device overrides.
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if isLocalDevice {
// Apply overrides to local device.
for k, v := range deviceOverrides[deviceName] {
devicesMap[deviceName][k] = v
}
} else {
// Check device exists in expanded profile devices.
profileDeviceConfig, found := profileDevices[deviceName]
if !found {
return nil, "", fmt.Errorf(i18n.G("Cannot override config for device %q: Device not found in profile devices"), deviceName)
}
for k, v := range deviceOverrides[deviceName] {
profileDeviceConfig[k] = v
}
// Add device to local devices.
devicesMap[deviceName] = profileDeviceConfig
}
}
req.Devices = devicesMap
var opInfo api.Operation
if !c.flagEmpty {
// Get the image server and image info
iremote, image = guessImage(conf, d, remote, iremote, image)
// Deal with the default image
if image == "" {
image = "default"
}
imgRemote, imgInfo, err := getImgInfo(d, conf, iremote, remote, image, &req.Source)
if err != nil {
return nil, "", err
}
if conf.Remotes[iremote].Protocol != "simplestreams" {
if imgInfo.Type != "virtual-machine" && c.flagVM {
return nil, "", fmt.Errorf(i18n.G("Asked for a VM but image is of type container"))
}
req.Type = api.InstanceType(imgInfo.Type)
}
// Create the instance
op, err := d.CreateInstanceFromImage(imgRemote, *imgInfo, req)
if err != nil {
return nil, "", err
}
// Watch the background operation
progress := cli.ProgressRenderer{
Format: i18n.G("Retrieving image: %s"),
Quiet: c.global.flagQuiet,
}
_, err = op.AddHandler(progress.UpdateOp)
if err != nil {
progress.Done("")
return nil, "", err
}
err = cli.CancelableWait(op, &progress)
if err != nil {
progress.Done("")
return nil, "", err
}
progress.Done("")
// Extract the instance name
info, err := op.GetTarget()
if err != nil {
return nil, "", err
}
opInfo = *info
} else {
req.Source.Type = "none"
op, err := d.CreateInstance(req)
if err != nil {
return nil, "", err
}
err = op.Wait()
if err != nil {
return nil, "", err
}
opInfo = op.Get()
}
instances, ok := opInfo.Resources["instances"]
if !ok || len(instances) == 0 {
// Try using the older "containers" field
instances, ok = opInfo.Resources["containers"]
if !ok || len(instances) == 0 {
return nil, "", fmt.Errorf(i18n.G("Didn't get any affected image, instance or snapshot from server"))
}
}
if len(instances) == 1 && name == "" {
fields := strings.Split(instances[0], "/")
name = fields[len(fields)-1]
fmt.Printf(i18n.G("Instance name is: %s")+"\n", name)
}
// Validate the network setup
c.checkNetwork(d, name)
return d, name, nil
}
func (c *cmdInit) checkNetwork(d incus.InstanceServer, name string) {
ct, _, err := d.GetInstance(name)
if err != nil {
return
}
for _, d := range ct.ExpandedDevices {
if d["type"] == "nic" {
return
}
}
fmt.Fprintf(os.Stderr, "\n"+i18n.G("The instance you are starting doesn't have any network attached to it.")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To create a new network, use: lxc network create")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To attach a network to an instance, use: lxc network attach")+"\n\n")
}
| {
return nil, "", err
} | conditional_block |
init.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/incus/client"
config "github.com/lxc/incus/internal/cliconfig"
"github.com/lxc/incus/shared/api"
cli "github.com/lxc/incus/shared/cmd"
"github.com/lxc/incus/shared/i18n"
"github.com/lxc/incus/shared/termios"
)
type cmdInit struct {
global *cmdGlobal
flagConfig []string
flagDevice []string
flagEphemeral bool
flagNetwork string
flagProfile []string
flagStorage string
flagTarget string
flagType string
flagNoProfiles bool
flagEmpty bool
flagVM bool
}
func (c *cmdInit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("init", i18n.G("[<remote>:]<image> [<remote>:][<name>]"))
cmd.Short = i18n.G("Create instances from images")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Create instances from images`))
cmd.Example = cli.FormatSection("", i18n.G(`lxc init images:ubuntu/22.04 u1
lxc init images:ubuntu/22.04 u1 < config.yaml
Create the instance with configuration from config.yaml`))
cmd.Hidden = true
cmd.RunE = c.Run
cmd.Flags().StringArrayVarP(&c.flagConfig, "config", "c", nil, i18n.G("Config key/value to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagProfile, "profile", "p", nil, i18n.G("Profile to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagDevice, "device", "d", nil, i18n.G("New key/value to apply to a specific device")+"``")
cmd.Flags().BoolVarP(&c.flagEphemeral, "ephemeral", "e", false, i18n.G("Ephemeral instance"))
cmd.Flags().StringVarP(&c.flagNetwork, "network", "n", "", i18n.G("Network name")+"``")
cmd.Flags().StringVarP(&c.flagStorage, "storage", "s", "", i18n.G("Storage pool name")+"``")
cmd.Flags().StringVarP(&c.flagType, "type", "t", "", i18n.G("Instance type")+"``")
cmd.Flags().StringVar(&c.flagTarget, "target", "", i18n.G("Cluster member name")+"``")
cmd.Flags().BoolVar(&c.flagNoProfiles, "no-profiles", false, i18n.G("Create the instance with no profiles applied"))
cmd.Flags().BoolVar(&c.flagEmpty, "empty", false, i18n.G("Create an empty instance"))
cmd.Flags().BoolVar(&c.flagVM, "vm", false, i18n.G("Create a virtual machine"))
return cmd
}
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 2)
if exit {
return err
}
if len(args) == 0 && !c.flagEmpty {
_ = cmd.Usage()
return nil
}
_, _, err = c.create(c.global.conf, args)
return err
}
func (c *cmdInit) create(conf *config.Config, args []string) (incus.InstanceServer, string, error) {
var name string
var image string
var remote string
var iremote string
var err error
var stdinData api.InstancePut
var devicesMap map[string]map[string]string
var configMap map[string]string
var profiles []string
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := io.ReadAll(os.Stdin)
if err != nil {
return nil, "", err
}
err = yaml.Unmarshal(contents, &stdinData)
if err != nil {
return nil, "", err
}
}
if len(args) > 0 {
iremote, image, err = conf.ParseRemote(args[0])
if err != nil {
return nil, "", err
}
if len(args) == 1 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 2 {
remote, name, err = conf.ParseRemote(args[1])
if err != nil {
return nil, "", err
}
}
}
if c.flagEmpty {
if len(args) > 1 {
return nil, "", fmt.Errorf(i18n.G("--empty cannot be combined with an image name"))
}
if len(args) == 0 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 1 {
// Switch image / instance names
name = image
remote = iremote
image = ""
iremote = ""
}
}
d, err := conf.GetInstanceServer(remote)
if err != nil {
return nil, "", err
}
if c.flagTarget != "" {
d = d.UseTarget(c.flagTarget)
}
// Overwrite profiles. | profiles = []string{}
}
if !c.global.flagQuiet {
if name == "" {
fmt.Printf(i18n.G("Creating the instance") + "\n")
} else {
fmt.Printf(i18n.G("Creating %s")+"\n", name)
}
}
if len(stdinData.Devices) > 0 {
devicesMap = stdinData.Devices
} else {
devicesMap = map[string]map[string]string{}
}
if c.flagNetwork != "" {
network, _, err := d.GetNetwork(c.flagNetwork)
if err != nil {
return nil, "", fmt.Errorf("Failed loading network %q: %w", c.flagNetwork, err)
}
// Prepare the instance's NIC device entry.
var device map[string]string
if network.Managed && d.HasExtension("instance_nic_network") {
// If network is managed, use the network property rather than nictype, so that the
// network's inherited properties are loaded into the NIC when started.
device = map[string]string{
"name": "eth0",
"type": "nic",
"network": network.Name,
}
} else {
// If network is unmanaged default to using a macvlan connected to the specified interface.
device = map[string]string{
"name": "eth0",
"type": "nic",
"nictype": "macvlan",
"parent": c.flagNetwork,
}
if network.Type == "bridge" {
// If the network type is an unmanaged bridge, use bridged NIC type.
device["nictype"] = "bridged"
}
}
devicesMap["eth0"] = device
}
if len(stdinData.Config) > 0 {
configMap = stdinData.Config
} else {
configMap = map[string]string{}
}
for _, entry := range c.flagConfig {
if !strings.Contains(entry, "=") {
return nil, "", fmt.Errorf(i18n.G("Bad key=value pair: %s"), entry)
}
fields := strings.SplitN(entry, "=", 2)
configMap[fields[0]] = fields[1]
}
// Check if the specified storage pool exists.
if c.flagStorage != "" {
_, _, err := d.GetStoragePool(c.flagStorage)
if err != nil {
return nil, "", fmt.Errorf("Failed loading storage pool %q: %w", c.flagStorage, err)
}
devicesMap["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": c.flagStorage,
}
}
// Decide whether we are creating a container or a virtual machine.
instanceDBType := api.InstanceTypeContainer
if c.flagVM {
instanceDBType = api.InstanceTypeVM
}
// Setup instance creation request
req := api.InstancesPost{
Name: name,
InstanceType: c.flagType,
Type: instanceDBType,
}
req.Config = configMap
req.Ephemeral = c.flagEphemeral
req.Description = stdinData.Description
if !c.flagNoProfiles && len(profiles) == 0 {
if len(stdinData.Profiles) > 0 {
req.Profiles = stdinData.Profiles
} else {
req.Profiles = nil
}
} else {
req.Profiles = profiles
}
// Handle device overrides.
deviceOverrides, err := parseDeviceOverrides(c.flagDevice)
if err != nil {
return nil, "", err
}
// Check to see if any of the overridden devices are for devices that are not yet defined in the
// local devices (and thus maybe expected to be coming from profiles).
profileDevices := make(map[string]map[string]string)
needProfileExpansion := false
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if !isLocalDevice {
needProfileExpansion = true
break
}
}
// If there are device overrides that are expected to be applied to profile devices then load the profiles
// that would be applied server-side.
if needProfileExpansion {
// If the list of profiles is empty then LXD would apply the default profile on the server side.
serverSideProfiles := req.Profiles
if len(serverSideProfiles) == 0 {
serverSideProfiles = []string{"default"}
}
// Get the effective expanded devices by overlaying each profile's devices in order.
for _, profileName := range serverSideProfiles {
profile, _, err := d.GetProfile(profileName)
if err != nil {
return nil, "", fmt.Errorf(i18n.G("Failed loading profile %q for device override: %w"), profileName, err)
}
for k, v := range profile.Devices {
profileDevices[k] = v
}
}
}
// Apply device overrides.
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if isLocalDevice {
// Apply overrides to local device.
for k, v := range deviceOverrides[deviceName] {
devicesMap[deviceName][k] = v
}
} else {
// Check device exists in expanded profile devices.
profileDeviceConfig, found := profileDevices[deviceName]
if !found {
return nil, "", fmt.Errorf(i18n.G("Cannot override config for device %q: Device not found in profile devices"), deviceName)
}
for k, v := range deviceOverrides[deviceName] {
profileDeviceConfig[k] = v
}
// Add device to local devices.
devicesMap[deviceName] = profileDeviceConfig
}
}
req.Devices = devicesMap
var opInfo api.Operation
if !c.flagEmpty {
// Get the image server and image info
iremote, image = guessImage(conf, d, remote, iremote, image)
// Deal with the default image
if image == "" {
image = "default"
}
imgRemote, imgInfo, err := getImgInfo(d, conf, iremote, remote, image, &req.Source)
if err != nil {
return nil, "", err
}
if conf.Remotes[iremote].Protocol != "simplestreams" {
if imgInfo.Type != "virtual-machine" && c.flagVM {
return nil, "", fmt.Errorf(i18n.G("Asked for a VM but image is of type container"))
}
req.Type = api.InstanceType(imgInfo.Type)
}
// Create the instance
op, err := d.CreateInstanceFromImage(imgRemote, *imgInfo, req)
if err != nil {
return nil, "", err
}
// Watch the background operation
progress := cli.ProgressRenderer{
Format: i18n.G("Retrieving image: %s"),
Quiet: c.global.flagQuiet,
}
_, err = op.AddHandler(progress.UpdateOp)
if err != nil {
progress.Done("")
return nil, "", err
}
err = cli.CancelableWait(op, &progress)
if err != nil {
progress.Done("")
return nil, "", err
}
progress.Done("")
// Extract the instance name
info, err := op.GetTarget()
if err != nil {
return nil, "", err
}
opInfo = *info
} else {
req.Source.Type = "none"
op, err := d.CreateInstance(req)
if err != nil {
return nil, "", err
}
err = op.Wait()
if err != nil {
return nil, "", err
}
opInfo = op.Get()
}
instances, ok := opInfo.Resources["instances"]
if !ok || len(instances) == 0 {
// Try using the older "containers" field
instances, ok = opInfo.Resources["containers"]
if !ok || len(instances) == 0 {
return nil, "", fmt.Errorf(i18n.G("Didn't get any affected image, instance or snapshot from server"))
}
}
if len(instances) == 1 && name == "" {
fields := strings.Split(instances[0], "/")
name = fields[len(fields)-1]
fmt.Printf(i18n.G("Instance name is: %s")+"\n", name)
}
// Validate the network setup
c.checkNetwork(d, name)
return d, name, nil
}
func (c *cmdInit) checkNetwork(d incus.InstanceServer, name string) {
ct, _, err := d.GetInstance(name)
if err != nil {
return
}
for _, d := range ct.ExpandedDevices {
if d["type"] == "nic" {
return
}
}
fmt.Fprintf(os.Stderr, "\n"+i18n.G("The instance you are starting doesn't have any network attached to it.")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To create a new network, use: lxc network create")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To attach a network to an instance, use: lxc network attach")+"\n\n")
} | if c.flagProfile != nil {
profiles = c.flagProfile
} else if c.flagNoProfiles { | random_line_split |
init.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/incus/client"
config "github.com/lxc/incus/internal/cliconfig"
"github.com/lxc/incus/shared/api"
cli "github.com/lxc/incus/shared/cmd"
"github.com/lxc/incus/shared/i18n"
"github.com/lxc/incus/shared/termios"
)
type cmdInit struct {
global *cmdGlobal
flagConfig []string
flagDevice []string
flagEphemeral bool
flagNetwork string
flagProfile []string
flagStorage string
flagTarget string
flagType string
flagNoProfiles bool
flagEmpty bool
flagVM bool
}
func (c *cmdInit) Command() *cobra.Command |
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 2)
if exit {
return err
}
if len(args) == 0 && !c.flagEmpty {
_ = cmd.Usage()
return nil
}
_, _, err = c.create(c.global.conf, args)
return err
}
func (c *cmdInit) create(conf *config.Config, args []string) (incus.InstanceServer, string, error) {
var name string
var image string
var remote string
var iremote string
var err error
var stdinData api.InstancePut
var devicesMap map[string]map[string]string
var configMap map[string]string
var profiles []string
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := io.ReadAll(os.Stdin)
if err != nil {
return nil, "", err
}
err = yaml.Unmarshal(contents, &stdinData)
if err != nil {
return nil, "", err
}
}
if len(args) > 0 {
iremote, image, err = conf.ParseRemote(args[0])
if err != nil {
return nil, "", err
}
if len(args) == 1 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 2 {
remote, name, err = conf.ParseRemote(args[1])
if err != nil {
return nil, "", err
}
}
}
if c.flagEmpty {
if len(args) > 1 {
return nil, "", fmt.Errorf(i18n.G("--empty cannot be combined with an image name"))
}
if len(args) == 0 {
remote, name, err = conf.ParseRemote("")
if err != nil {
return nil, "", err
}
} else if len(args) == 1 {
// Switch image / instance names
name = image
remote = iremote
image = ""
iremote = ""
}
}
d, err := conf.GetInstanceServer(remote)
if err != nil {
return nil, "", err
}
if c.flagTarget != "" {
d = d.UseTarget(c.flagTarget)
}
// Overwrite profiles.
if c.flagProfile != nil {
profiles = c.flagProfile
} else if c.flagNoProfiles {
profiles = []string{}
}
if !c.global.flagQuiet {
if name == "" {
fmt.Printf(i18n.G("Creating the instance") + "\n")
} else {
fmt.Printf(i18n.G("Creating %s")+"\n", name)
}
}
if len(stdinData.Devices) > 0 {
devicesMap = stdinData.Devices
} else {
devicesMap = map[string]map[string]string{}
}
if c.flagNetwork != "" {
network, _, err := d.GetNetwork(c.flagNetwork)
if err != nil {
return nil, "", fmt.Errorf("Failed loading network %q: %w", c.flagNetwork, err)
}
// Prepare the instance's NIC device entry.
var device map[string]string
if network.Managed && d.HasExtension("instance_nic_network") {
// If network is managed, use the network property rather than nictype, so that the
// network's inherited properties are loaded into the NIC when started.
device = map[string]string{
"name": "eth0",
"type": "nic",
"network": network.Name,
}
} else {
// If network is unmanaged default to using a macvlan connected to the specified interface.
device = map[string]string{
"name": "eth0",
"type": "nic",
"nictype": "macvlan",
"parent": c.flagNetwork,
}
if network.Type == "bridge" {
// If the network type is an unmanaged bridge, use bridged NIC type.
device["nictype"] = "bridged"
}
}
devicesMap["eth0"] = device
}
if len(stdinData.Config) > 0 {
configMap = stdinData.Config
} else {
configMap = map[string]string{}
}
for _, entry := range c.flagConfig {
if !strings.Contains(entry, "=") {
return nil, "", fmt.Errorf(i18n.G("Bad key=value pair: %s"), entry)
}
fields := strings.SplitN(entry, "=", 2)
configMap[fields[0]] = fields[1]
}
// Check if the specified storage pool exists.
if c.flagStorage != "" {
_, _, err := d.GetStoragePool(c.flagStorage)
if err != nil {
return nil, "", fmt.Errorf("Failed loading storage pool %q: %w", c.flagStorage, err)
}
devicesMap["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": c.flagStorage,
}
}
// Decide whether we are creating a container or a virtual machine.
instanceDBType := api.InstanceTypeContainer
if c.flagVM {
instanceDBType = api.InstanceTypeVM
}
// Setup instance creation request
req := api.InstancesPost{
Name: name,
InstanceType: c.flagType,
Type: instanceDBType,
}
req.Config = configMap
req.Ephemeral = c.flagEphemeral
req.Description = stdinData.Description
if !c.flagNoProfiles && len(profiles) == 0 {
if len(stdinData.Profiles) > 0 {
req.Profiles = stdinData.Profiles
} else {
req.Profiles = nil
}
} else {
req.Profiles = profiles
}
// Handle device overrides.
deviceOverrides, err := parseDeviceOverrides(c.flagDevice)
if err != nil {
return nil, "", err
}
// Check to see if any of the overridden devices are for devices that are not yet defined in the
// local devices (and thus maybe expected to be coming from profiles).
profileDevices := make(map[string]map[string]string)
needProfileExpansion := false
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if !isLocalDevice {
needProfileExpansion = true
break
}
}
// If there are device overrides that are expected to be applied to profile devices then load the profiles
// that would be applied server-side.
if needProfileExpansion {
// If the list of profiles is empty then LXD would apply the default profile on the server side.
serverSideProfiles := req.Profiles
if len(serverSideProfiles) == 0 {
serverSideProfiles = []string{"default"}
}
// Get the effective expanded devices by overlaying each profile's devices in order.
for _, profileName := range serverSideProfiles {
profile, _, err := d.GetProfile(profileName)
if err != nil {
return nil, "", fmt.Errorf(i18n.G("Failed loading profile %q for device override: %w"), profileName, err)
}
for k, v := range profile.Devices {
profileDevices[k] = v
}
}
}
// Apply device overrides.
for deviceName := range deviceOverrides {
_, isLocalDevice := devicesMap[deviceName]
if isLocalDevice {
// Apply overrides to local device.
for k, v := range deviceOverrides[deviceName] {
devicesMap[deviceName][k] = v
}
} else {
// Check device exists in expanded profile devices.
profileDeviceConfig, found := profileDevices[deviceName]
if !found {
return nil, "", fmt.Errorf(i18n.G("Cannot override config for device %q: Device not found in profile devices"), deviceName)
}
for k, v := range deviceOverrides[deviceName] {
profileDeviceConfig[k] = v
}
// Add device to local devices.
devicesMap[deviceName] = profileDeviceConfig
}
}
req.Devices = devicesMap
var opInfo api.Operation
if !c.flagEmpty {
// Get the image server and image info
iremote, image = guessImage(conf, d, remote, iremote, image)
// Deal with the default image
if image == "" {
image = "default"
}
imgRemote, imgInfo, err := getImgInfo(d, conf, iremote, remote, image, &req.Source)
if err != nil {
return nil, "", err
}
if conf.Remotes[iremote].Protocol != "simplestreams" {
if imgInfo.Type != "virtual-machine" && c.flagVM {
return nil, "", fmt.Errorf(i18n.G("Asked for a VM but image is of type container"))
}
req.Type = api.InstanceType(imgInfo.Type)
}
// Create the instance
op, err := d.CreateInstanceFromImage(imgRemote, *imgInfo, req)
if err != nil {
return nil, "", err
}
// Watch the background operation
progress := cli.ProgressRenderer{
Format: i18n.G("Retrieving image: %s"),
Quiet: c.global.flagQuiet,
}
_, err = op.AddHandler(progress.UpdateOp)
if err != nil {
progress.Done("")
return nil, "", err
}
err = cli.CancelableWait(op, &progress)
if err != nil {
progress.Done("")
return nil, "", err
}
progress.Done("")
// Extract the instance name
info, err := op.GetTarget()
if err != nil {
return nil, "", err
}
opInfo = *info
} else {
req.Source.Type = "none"
op, err := d.CreateInstance(req)
if err != nil {
return nil, "", err
}
err = op.Wait()
if err != nil {
return nil, "", err
}
opInfo = op.Get()
}
instances, ok := opInfo.Resources["instances"]
if !ok || len(instances) == 0 {
// Try using the older "containers" field
instances, ok = opInfo.Resources["containers"]
if !ok || len(instances) == 0 {
return nil, "", fmt.Errorf(i18n.G("Didn't get any affected image, instance or snapshot from server"))
}
}
if len(instances) == 1 && name == "" {
fields := strings.Split(instances[0], "/")
name = fields[len(fields)-1]
fmt.Printf(i18n.G("Instance name is: %s")+"\n", name)
}
// Validate the network setup
c.checkNetwork(d, name)
return d, name, nil
}
func (c *cmdInit) checkNetwork(d incus.InstanceServer, name string) {
ct, _, err := d.GetInstance(name)
if err != nil {
return
}
for _, d := range ct.ExpandedDevices {
if d["type"] == "nic" {
return
}
}
fmt.Fprintf(os.Stderr, "\n"+i18n.G("The instance you are starting doesn't have any network attached to it.")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To create a new network, use: lxc network create")+"\n")
fmt.Fprintf(os.Stderr, " "+i18n.G("To attach a network to an instance, use: lxc network attach")+"\n\n")
}
| {
cmd := &cobra.Command{}
cmd.Use = usage("init", i18n.G("[<remote>:]<image> [<remote>:][<name>]"))
cmd.Short = i18n.G("Create instances from images")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Create instances from images`))
cmd.Example = cli.FormatSection("", i18n.G(`lxc init images:ubuntu/22.04 u1
lxc init images:ubuntu/22.04 u1 < config.yaml
Create the instance with configuration from config.yaml`))
cmd.Hidden = true
cmd.RunE = c.Run
cmd.Flags().StringArrayVarP(&c.flagConfig, "config", "c", nil, i18n.G("Config key/value to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagProfile, "profile", "p", nil, i18n.G("Profile to apply to the new instance")+"``")
cmd.Flags().StringArrayVarP(&c.flagDevice, "device", "d", nil, i18n.G("New key/value to apply to a specific device")+"``")
cmd.Flags().BoolVarP(&c.flagEphemeral, "ephemeral", "e", false, i18n.G("Ephemeral instance"))
cmd.Flags().StringVarP(&c.flagNetwork, "network", "n", "", i18n.G("Network name")+"``")
cmd.Flags().StringVarP(&c.flagStorage, "storage", "s", "", i18n.G("Storage pool name")+"``")
cmd.Flags().StringVarP(&c.flagType, "type", "t", "", i18n.G("Instance type")+"``")
cmd.Flags().StringVar(&c.flagTarget, "target", "", i18n.G("Cluster member name")+"``")
cmd.Flags().BoolVar(&c.flagNoProfiles, "no-profiles", false, i18n.G("Create the instance with no profiles applied"))
cmd.Flags().BoolVar(&c.flagEmpty, "empty", false, i18n.G("Create an empty instance"))
cmd.Flags().BoolVar(&c.flagVM, "vm", false, i18n.G("Create a virtual machine"))
return cmd
} | identifier_body |
generate_conv_gan.py | #!/usr/bin/env python3
"""Generate a sequence of poses with convolutional generator and
discriminator."""
from keras.models import Model, Sequential
from keras.layers import Dense, Activation, TimeDistributed, LSTM, \
RepeatVector, Input, Dropout, LeakyReLU, Convolution1D, Flatten, \
BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.utils.generic_utils import Progbar
import numpy as np
import re
from glob import glob
from os import path, mkdir, makedirs
from scipy.io import savemat
from multiprocessing.pool import Pool
from common import GOOD_MOCAP_INDS, insert_junk_entries
np.random.seed(2372143511)
WEIGHTS_PATH = './best-conv-gan-weights.h5'
SEQ_LENGTH = 32
SEQ_NOISE_PAD = 7
NOISE_DIM = 30
BATCH_SIZE = 16
K = 2
def make_generator(pose_size):
x = in_layer = Input(shape=(SEQ_LENGTH + 2 * SEQ_NOISE_PAD, NOISE_DIM))
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = out_layer = Convolution1D(pose_size, 3, border_mode='valid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
def | (pose_size):
in_shape = (SEQ_LENGTH, pose_size)
x = in_layer = Input(shape=in_shape)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 8, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(1, 7, border_mode='valid')(x)
x = Flatten()(x)
out_layer = Activation('sigmoid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
class GANTrainer:
noise_dim = NOISE_DIM
seq_length = SEQ_LENGTH
seq_pad = SEQ_NOISE_PAD
def __init__(self, pose_size, d_lr=0.0002, g_lr=0.00002):
self.discriminator = make_discriminator(pose_size)
# Copy is read-only; it doesn't get compiled
self.discriminator_copy = make_discriminator(pose_size)
self.discriminator_copy.trainable = False
disc_opt = Adam(lr=d_lr, beta_1=0.5)
self.discriminator.compile(disc_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.generator = make_generator(pose_size)
self.generator.compile('sgd', 'mae')
nested = Sequential()
nested.add(self.generator)
nested.add(self.discriminator_copy)
gen_opt = Adam(lr=g_lr, beta_1=0.5)
nested.compile(gen_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.nested_generator = nested
self.num_disc_steps = 0
self.num_gen_steps = 0
def update_disc_copy(self):
"""Copy weights from real discriminator over to nested one. This skirts
a lot of Keras issues with nested, shared models."""
source = self.discriminator
dest = self.discriminator_copy
assert len(source.layers) == len(dest.layers)
for dest_layer, source_layer in zip(dest.layers, source.layers):
dest_layer.set_weights(source_layer.get_weights())
def make_noise(self, num):
"""Input noise for generator"""
return np.random.randn(num, self.seq_length + 2 * self.seq_pad,
self.noise_dim)
def gen_train_step(self, batch_size):
"""Train the generator to fool the discriminator."""
self.discriminator.trainable = False
labels = [1] * batch_size
noise = self.make_noise(batch_size)
self.update_disc_copy()
self.num_gen_steps += 1
pre_weights = self.discriminator_copy.get_weights()
rv = self.nested_generator.train_on_batch(noise, labels)
self.update_disc_copy()
post_weights = self.discriminator_copy.get_weights()
# The next assertion fails with batch norm when I don't copy. I don't
# know how to stop those layers from updating :(
assert all(np.all(a == b) for a, b in zip(pre_weights, post_weights))
return rv
def disc_train_step(self, true_batch):
"""Get some true poses and train discriminator to distinguish them from
generated poses."""
self.discriminator.trainable = True
poses = self.generate_poses(len(true_batch))
labels = np.array([1] * len(true_batch) + [0] * len(poses))
data = np.concatenate([true_batch, poses])
self.num_disc_steps += 1
# Get back loss
return self.discriminator.train_on_batch(data, labels)
def disc_val(self, val_data, batch_size):
"""Validate discriminator by checking whether it can spot fakes."""
fakes = self.generate_poses(len(val_data))
labels = np.array([1] * len(val_data) + [0] * len(fakes))
data = np.concatenate([val_data, fakes])
return self.discriminator.evaluate(data, labels,
batch_size=batch_size)
def gen_val(self, num_poses, batch_size):
"""Validate generator by figuring out how good it is at fooling
discriminator (closely related to discriminator step; just helps us
break down accuracy a bit)."""
noise = self.make_noise(num_poses)
labels = [1] * num_poses
self.update_disc_copy()
rv = self.nested_generator.evaluate(noise, labels,
batch_size=batch_size)
self.update_disc_copy()
return rv
def generate_poses(self, num, batch_size=BATCH_SIZE):
"""Generate some fixed number of poses. Useful for both generator and
discriminator training."""
return self.generator.predict(self.make_noise(num),
batch_size=batch_size)
def save(self, dest_dir):
"""Save generator and discriminator to some path"""
try:
makedirs(dest_dir)
except FileExistsError:
pass
suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)
gen_path = path.join(dest_dir, 'gen' + suffix)
disc_path = path.join(dest_dir, 'disc' + suffix)
self.discriminator.save(disc_path)
self.generator.save(gen_path)
def train_model(train_X, val_X, mu, sigma):
assert train_X.ndim == 3, train_X.shape
total_X, time_steps, out_shape = train_X.shape
trainer = GANTrainer(out_shape)
epochs = 0
# GAN predictions will be put in here
try:
mkdir('gan-conv-out')
except FileExistsError:
pass
print('Training generator')
while True:
copy_X = train_X.copy()
np.random.shuffle(copy_X)
total_X, _, _ = copy_X.shape
to_fetch = BATCH_SIZE // 2
epochs += 1
print('Epoch %d' % epochs)
bar = Progbar(total_X)
bar.update(0)
epoch_fetched = 0
while epoch_fetched < total_X:
# Fetch some ground truth to train the discriminator
for i in range(K):
if epoch_fetched >= total_X:
break
fetched = copy_X[epoch_fetched:epoch_fetched+to_fetch]
dloss, dacc = trainer.disc_train_step(fetched)
epoch_fetched += len(fetched)
bar.update(epoch_fetched, values=[
('d_loss', dloss), ('d_acc', dacc)
])
# Train the generator (don't worry about loss)
trainer.gen_train_step(BATCH_SIZE)
# End of an epoch, so let's validate models (doesn't work so great,
# TBH)
print('\nValidating')
disc_loss, disc_acc = trainer.disc_val(val_X, BATCH_SIZE)
gen_loss, gen_acc = trainer.gen_val(100, BATCH_SIZE)
print('\nDisc loss/acc: %g/%g' % (disc_loss, disc_acc))
print('Gen loss/acc: %g/%g' % (gen_loss, gen_acc))
# Also save some predictions so that we can monitor training
print('Saving predictions')
poses = trainer.generate_poses(16) * sigma + mean
poses = insert_junk_entries(poses)
savemat('gan-conv-out/gan-conv-preds-epoch-%d.mat' % epochs, {'poses': poses})
# Sometimes we save a model
if not (epochs - 1) % 5:
dest_dir = 'saved-conv-gans/'
print('Saving model to %s' % dest_dir)
trainer.save(dest_dir)
def prepare_file(filename):
poses = np.loadtxt(filename, delimiter=',')
assert poses.ndim == 2 and poses.shape[1] == 99, poses.shape
zero_inds, = np.nonzero((poses != 0).any(axis=0))
assert (zero_inds == GOOD_MOCAP_INDS).all(), zero_inds
poses = poses[:, GOOD_MOCAP_INDS]
seqs = []
end = len(poses) - SEQ_LENGTH + 1
# TODO: May make sense to have a bigger overlap here
step = max(1, min(SEQ_LENGTH // 2, 50))
for start in range(0, end, step):
seqs.append(poses[start:start+SEQ_LENGTH])
return np.stack(seqs)
def is_valid(data):
return np.isfinite(data).all()
_fnre = re.compile(r'^expmap_S(?P<subject>\d+)_(?P<action>.+).txt.gz$')
def mapper(filename):
base = path.basename(filename)
meta = _fnre.match(base).groupdict()
subj_id = int(meta['subject'])
X = prepare_file(filename)
return (subj_id, X)
def load_data():
filenames = glob('h36m-3d-poses/expmap_*.txt.gz')
train_X_blocks = []
test_X_blocks = []
with Pool() as pool:
for subj_id, X in pool.map(mapper, filenames):
if subj_id == 5:
# subject 5 is for testing
test_X_blocks.append(X)
else:
train_X_blocks.append(X)
train_X = np.concatenate(train_X_blocks, axis=0)
test_X = np.concatenate(test_X_blocks, axis=0)
N, T, D = train_X.shape
mean = train_X.reshape((N*T, D)).mean(axis=0).reshape((1, 1, -1))
std = train_X.reshape((N*T, D)).std(axis=0).reshape((1, 1, -1))
train_X = (train_X - mean) / std
test_X = (test_X - mean) / std
assert is_valid(train_X)
assert is_valid(test_X)
return train_X, test_X, mean, std
if __name__ == '__main__':
print('Loading data')
train_X, val_X, mean, std = load_data()
print('Data loaded')
model = train_model(train_X, val_X, mean, std)
| make_discriminator | identifier_name |
generate_conv_gan.py | #!/usr/bin/env python3
"""Generate a sequence of poses with convolutional generator and
discriminator."""
from keras.models import Model, Sequential
from keras.layers import Dense, Activation, TimeDistributed, LSTM, \
RepeatVector, Input, Dropout, LeakyReLU, Convolution1D, Flatten, \
BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.utils.generic_utils import Progbar
import numpy as np
import re
from glob import glob
from os import path, mkdir, makedirs
from scipy.io import savemat
from multiprocessing.pool import Pool
from common import GOOD_MOCAP_INDS, insert_junk_entries
np.random.seed(2372143511)
WEIGHTS_PATH = './best-conv-gan-weights.h5'
SEQ_LENGTH = 32
SEQ_NOISE_PAD = 7
NOISE_DIM = 30
BATCH_SIZE = 16
K = 2
def make_generator(pose_size):
x = in_layer = Input(shape=(SEQ_LENGTH + 2 * SEQ_NOISE_PAD, NOISE_DIM))
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = out_layer = Convolution1D(pose_size, 3, border_mode='valid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
def make_discriminator(pose_size):
in_shape = (SEQ_LENGTH, pose_size)
x = in_layer = Input(shape=in_shape)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 8, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(1, 7, border_mode='valid')(x)
x = Flatten()(x)
out_layer = Activation('sigmoid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
class GANTrainer:
noise_dim = NOISE_DIM
seq_length = SEQ_LENGTH
seq_pad = SEQ_NOISE_PAD
def __init__(self, pose_size, d_lr=0.0002, g_lr=0.00002):
self.discriminator = make_discriminator(pose_size)
# Copy is read-only; it doesn't get compiled
self.discriminator_copy = make_discriminator(pose_size)
self.discriminator_copy.trainable = False
disc_opt = Adam(lr=d_lr, beta_1=0.5)
self.discriminator.compile(disc_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.generator = make_generator(pose_size)
self.generator.compile('sgd', 'mae')
nested = Sequential()
nested.add(self.generator)
nested.add(self.discriminator_copy)
gen_opt = Adam(lr=g_lr, beta_1=0.5)
nested.compile(gen_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.nested_generator = nested
self.num_disc_steps = 0
self.num_gen_steps = 0
def update_disc_copy(self):
"""Copy weights from real discriminator over to nested one. This skirts
a lot of Keras issues with nested, shared models."""
source = self.discriminator
dest = self.discriminator_copy
assert len(source.layers) == len(dest.layers)
for dest_layer, source_layer in zip(dest.layers, source.layers):
dest_layer.set_weights(source_layer.get_weights())
def make_noise(self, num):
"""Input noise for generator"""
return np.random.randn(num, self.seq_length + 2 * self.seq_pad,
self.noise_dim)
def gen_train_step(self, batch_size):
"""Train the generator to fool the discriminator."""
self.discriminator.trainable = False
labels = [1] * batch_size
noise = self.make_noise(batch_size)
self.update_disc_copy()
self.num_gen_steps += 1
pre_weights = self.discriminator_copy.get_weights()
rv = self.nested_generator.train_on_batch(noise, labels)
self.update_disc_copy()
post_weights = self.discriminator_copy.get_weights()
# The next assertion fails with batch norm when I don't copy. I don't
# know how to stop those layers from updating :(
assert all(np.all(a == b) for a, b in zip(pre_weights, post_weights))
return rv
def disc_train_step(self, true_batch):
"""Get some true poses and train discriminator to distinguish them from
generated poses."""
self.discriminator.trainable = True
poses = self.generate_poses(len(true_batch))
labels = np.array([1] * len(true_batch) + [0] * len(poses))
data = np.concatenate([true_batch, poses])
self.num_disc_steps += 1
# Get back loss
return self.discriminator.train_on_batch(data, labels)
def disc_val(self, val_data, batch_size):
"""Validate discriminator by checking whether it can spot fakes."""
fakes = self.generate_poses(len(val_data))
labels = np.array([1] * len(val_data) + [0] * len(fakes))
data = np.concatenate([val_data, fakes])
return self.discriminator.evaluate(data, labels,
batch_size=batch_size)
def gen_val(self, num_poses, batch_size):
"""Validate generator by figuring out how good it is at fooling
discriminator (closely related to discriminator step; just helps us
break down accuracy a bit)."""
noise = self.make_noise(num_poses)
labels = [1] * num_poses
self.update_disc_copy()
rv = self.nested_generator.evaluate(noise, labels,
batch_size=batch_size)
self.update_disc_copy()
return rv
def generate_poses(self, num, batch_size=BATCH_SIZE):
"""Generate some fixed number of poses. Useful for both generator and
discriminator training."""
return self.generator.predict(self.make_noise(num),
batch_size=batch_size)
def save(self, dest_dir):
"""Save generator and discriminator to some path"""
try:
makedirs(dest_dir)
except FileExistsError:
pass
suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)
gen_path = path.join(dest_dir, 'gen' + suffix)
disc_path = path.join(dest_dir, 'disc' + suffix)
self.discriminator.save(disc_path)
self.generator.save(gen_path)
def train_model(train_X, val_X, mu, sigma):
assert train_X.ndim == 3, train_X.shape
total_X, time_steps, out_shape = train_X.shape
trainer = GANTrainer(out_shape)
epochs = 0
# GAN predictions will be put in here
try:
mkdir('gan-conv-out')
except FileExistsError:
pass
print('Training generator')
while True:
copy_X = train_X.copy()
np.random.shuffle(copy_X)
total_X, _, _ = copy_X.shape
to_fetch = BATCH_SIZE // 2
epochs += 1
print('Epoch %d' % epochs)
bar = Progbar(total_X)
bar.update(0)
epoch_fetched = 0
while epoch_fetched < total_X:
# Fetch some ground truth to train the discriminator
for i in range(K):
if epoch_fetched >= total_X:
break
fetched = copy_X[epoch_fetched:epoch_fetched+to_fetch]
dloss, dacc = trainer.disc_train_step(fetched)
epoch_fetched += len(fetched)
bar.update(epoch_fetched, values=[
('d_loss', dloss), ('d_acc', dacc)
])
# Train the generator (don't worry about loss)
trainer.gen_train_step(BATCH_SIZE)
# End of an epoch, so let's validate models (doesn't work so great,
# TBH)
print('\nValidating')
disc_loss, disc_acc = trainer.disc_val(val_X, BATCH_SIZE)
gen_loss, gen_acc = trainer.gen_val(100, BATCH_SIZE)
print('\nDisc loss/acc: %g/%g' % (disc_loss, disc_acc))
print('Gen loss/acc: %g/%g' % (gen_loss, gen_acc))
# Also save some predictions so that we can monitor training
print('Saving predictions')
poses = trainer.generate_poses(16) * sigma + mean
poses = insert_junk_entries(poses)
savemat('gan-conv-out/gan-conv-preds-epoch-%d.mat' % epochs, {'poses': poses})
# Sometimes we save a model
if not (epochs - 1) % 5:
dest_dir = 'saved-conv-gans/'
print('Saving model to %s' % dest_dir)
trainer.save(dest_dir)
def prepare_file(filename):
poses = np.loadtxt(filename, delimiter=',')
assert poses.ndim == 2 and poses.shape[1] == 99, poses.shape
zero_inds, = np.nonzero((poses != 0).any(axis=0))
assert (zero_inds == GOOD_MOCAP_INDS).all(), zero_inds
poses = poses[:, GOOD_MOCAP_INDS]
seqs = []
end = len(poses) - SEQ_LENGTH + 1
# TODO: May make sense to have a bigger overlap here
step = max(1, min(SEQ_LENGTH // 2, 50))
for start in range(0, end, step):
seqs.append(poses[start:start+SEQ_LENGTH])
return np.stack(seqs)
def is_valid(data):
return np.isfinite(data).all()
_fnre = re.compile(r'^expmap_S(?P<subject>\d+)_(?P<action>.+).txt.gz$')
def mapper(filename):
base = path.basename(filename)
meta = _fnre.match(base).groupdict()
subj_id = int(meta['subject'])
X = prepare_file(filename)
return (subj_id, X)
def load_data():
|
if __name__ == '__main__':
print('Loading data')
train_X, val_X, mean, std = load_data()
print('Data loaded')
model = train_model(train_X, val_X, mean, std)
| filenames = glob('h36m-3d-poses/expmap_*.txt.gz')
train_X_blocks = []
test_X_blocks = []
with Pool() as pool:
for subj_id, X in pool.map(mapper, filenames):
if subj_id == 5:
# subject 5 is for testing
test_X_blocks.append(X)
else:
train_X_blocks.append(X)
train_X = np.concatenate(train_X_blocks, axis=0)
test_X = np.concatenate(test_X_blocks, axis=0)
N, T, D = train_X.shape
mean = train_X.reshape((N*T, D)).mean(axis=0).reshape((1, 1, -1))
std = train_X.reshape((N*T, D)).std(axis=0).reshape((1, 1, -1))
train_X = (train_X - mean) / std
test_X = (test_X - mean) / std
assert is_valid(train_X)
assert is_valid(test_X)
return train_X, test_X, mean, std | identifier_body |
generate_conv_gan.py | #!/usr/bin/env python3
"""Generate a sequence of poses with convolutional generator and
discriminator."""
from keras.models import Model, Sequential
from keras.layers import Dense, Activation, TimeDistributed, LSTM, \
RepeatVector, Input, Dropout, LeakyReLU, Convolution1D, Flatten, \
BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.utils.generic_utils import Progbar
import numpy as np
import re
from glob import glob
from os import path, mkdir, makedirs
from scipy.io import savemat
from multiprocessing.pool import Pool
from common import GOOD_MOCAP_INDS, insert_junk_entries
np.random.seed(2372143511)
WEIGHTS_PATH = './best-conv-gan-weights.h5'
SEQ_LENGTH = 32
SEQ_NOISE_PAD = 7
NOISE_DIM = 30
BATCH_SIZE = 16
K = 2
def make_generator(pose_size):
x = in_layer = Input(shape=(SEQ_LENGTH + 2 * SEQ_NOISE_PAD, NOISE_DIM))
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = out_layer = Convolution1D(pose_size, 3, border_mode='valid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
def make_discriminator(pose_size):
in_shape = (SEQ_LENGTH, pose_size)
x = in_layer = Input(shape=in_shape)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 8, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(1, 7, border_mode='valid')(x)
x = Flatten()(x)
out_layer = Activation('sigmoid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
class GANTrainer:
noise_dim = NOISE_DIM
seq_length = SEQ_LENGTH
seq_pad = SEQ_NOISE_PAD
def __init__(self, pose_size, d_lr=0.0002, g_lr=0.00002):
self.discriminator = make_discriminator(pose_size)
# Copy is read-only; it doesn't get compiled
self.discriminator_copy = make_discriminator(pose_size)
self.discriminator_copy.trainable = False
disc_opt = Adam(lr=d_lr, beta_1=0.5)
self.discriminator.compile(disc_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.generator = make_generator(pose_size)
self.generator.compile('sgd', 'mae')
nested = Sequential()
nested.add(self.generator)
nested.add(self.discriminator_copy)
gen_opt = Adam(lr=g_lr, beta_1=0.5)
nested.compile(gen_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.nested_generator = nested
self.num_disc_steps = 0
self.num_gen_steps = 0
def update_disc_copy(self):
"""Copy weights from real discriminator over to nested one. This skirts
a lot of Keras issues with nested, shared models."""
source = self.discriminator
dest = self.discriminator_copy
assert len(source.layers) == len(dest.layers)
for dest_layer, source_layer in zip(dest.layers, source.layers):
dest_layer.set_weights(source_layer.get_weights())
def make_noise(self, num):
"""Input noise for generator"""
return np.random.randn(num, self.seq_length + 2 * self.seq_pad,
self.noise_dim)
def gen_train_step(self, batch_size):
"""Train the generator to fool the discriminator."""
self.discriminator.trainable = False
labels = [1] * batch_size
noise = self.make_noise(batch_size)
self.update_disc_copy()
self.num_gen_steps += 1
pre_weights = self.discriminator_copy.get_weights()
rv = self.nested_generator.train_on_batch(noise, labels)
self.update_disc_copy()
post_weights = self.discriminator_copy.get_weights()
# The next assertion fails with batch norm when I don't copy. I don't
# know how to stop those layers from updating :(
assert all(np.all(a == b) for a, b in zip(pre_weights, post_weights))
return rv
def disc_train_step(self, true_batch):
"""Get some true poses and train discriminator to distinguish them from
generated poses."""
self.discriminator.trainable = True
poses = self.generate_poses(len(true_batch))
labels = np.array([1] * len(true_batch) + [0] * len(poses))
data = np.concatenate([true_batch, poses])
self.num_disc_steps += 1
# Get back loss
return self.discriminator.train_on_batch(data, labels)
def disc_val(self, val_data, batch_size):
"""Validate discriminator by checking whether it can spot fakes."""
fakes = self.generate_poses(len(val_data))
labels = np.array([1] * len(val_data) + [0] * len(fakes))
data = np.concatenate([val_data, fakes])
return self.discriminator.evaluate(data, labels,
batch_size=batch_size)
def gen_val(self, num_poses, batch_size):
"""Validate generator by figuring out how good it is at fooling
discriminator (closely related to discriminator step; just helps us
break down accuracy a bit)."""
noise = self.make_noise(num_poses)
labels = [1] * num_poses
self.update_disc_copy()
rv = self.nested_generator.evaluate(noise, labels,
batch_size=batch_size)
self.update_disc_copy()
return rv
def generate_poses(self, num, batch_size=BATCH_SIZE):
"""Generate some fixed number of poses. Useful for both generator and
discriminator training."""
return self.generator.predict(self.make_noise(num),
batch_size=batch_size)
def save(self, dest_dir):
"""Save generator and discriminator to some path"""
try:
makedirs(dest_dir)
except FileExistsError:
pass
suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)
gen_path = path.join(dest_dir, 'gen' + suffix)
disc_path = path.join(dest_dir, 'disc' + suffix)
self.discriminator.save(disc_path)
self.generator.save(gen_path)
def train_model(train_X, val_X, mu, sigma):
assert train_X.ndim == 3, train_X.shape
total_X, time_steps, out_shape = train_X.shape
trainer = GANTrainer(out_shape)
epochs = 0
# GAN predictions will be put in here
try:
mkdir('gan-conv-out')
except FileExistsError:
pass
print('Training generator')
while True:
copy_X = train_X.copy()
np.random.shuffle(copy_X)
total_X, _, _ = copy_X.shape
to_fetch = BATCH_SIZE // 2
epochs += 1
print('Epoch %d' % epochs)
bar = Progbar(total_X)
bar.update(0)
epoch_fetched = 0
while epoch_fetched < total_X:
# Fetch some ground truth to train the discriminator
for i in range(K):
if epoch_fetched >= total_X:
break
fetched = copy_X[epoch_fetched:epoch_fetched+to_fetch]
dloss, dacc = trainer.disc_train_step(fetched)
epoch_fetched += len(fetched)
bar.update(epoch_fetched, values=[
('d_loss', dloss), ('d_acc', dacc)
])
# Train the generator (don't worry about loss)
trainer.gen_train_step(BATCH_SIZE)
# End of an epoch, so let's validate models (doesn't work so great,
# TBH)
print('\nValidating')
disc_loss, disc_acc = trainer.disc_val(val_X, BATCH_SIZE)
gen_loss, gen_acc = trainer.gen_val(100, BATCH_SIZE)
print('\nDisc loss/acc: %g/%g' % (disc_loss, disc_acc))
print('Gen loss/acc: %g/%g' % (gen_loss, gen_acc))
# Also save some predictions so that we can monitor training
print('Saving predictions')
poses = trainer.generate_poses(16) * sigma + mean
poses = insert_junk_entries(poses)
savemat('gan-conv-out/gan-conv-preds-epoch-%d.mat' % epochs, {'poses': poses})
# Sometimes we save a model
if not (epochs - 1) % 5:
dest_dir = 'saved-conv-gans/'
print('Saving model to %s' % dest_dir)
trainer.save(dest_dir)
def prepare_file(filename):
poses = np.loadtxt(filename, delimiter=',')
assert poses.ndim == 2 and poses.shape[1] == 99, poses.shape
zero_inds, = np.nonzero((poses != 0).any(axis=0))
assert (zero_inds == GOOD_MOCAP_INDS).all(), zero_inds
poses = poses[:, GOOD_MOCAP_INDS]
seqs = []
end = len(poses) - SEQ_LENGTH + 1
# TODO: May make sense to have a bigger overlap here
step = max(1, min(SEQ_LENGTH // 2, 50))
for start in range(0, end, step):
seqs.append(poses[start:start+SEQ_LENGTH])
return np.stack(seqs)
def is_valid(data):
return np.isfinite(data).all()
_fnre = re.compile(r'^expmap_S(?P<subject>\d+)_(?P<action>.+).txt.gz$')
def mapper(filename):
base = path.basename(filename)
meta = _fnre.match(base).groupdict()
subj_id = int(meta['subject'])
X = prepare_file(filename)
return (subj_id, X)
def load_data():
filenames = glob('h36m-3d-poses/expmap_*.txt.gz')
train_X_blocks = []
test_X_blocks = []
with Pool() as pool:
for subj_id, X in pool.map(mapper, filenames):
if subj_id == 5:
# subject 5 is for testing
test_X_blocks.append(X)
else:
train_X_blocks.append(X)
train_X = np.concatenate(train_X_blocks, axis=0)
test_X = np.concatenate(test_X_blocks, axis=0)
N, T, D = train_X.shape
mean = train_X.reshape((N*T, D)).mean(axis=0).reshape((1, 1, -1))
std = train_X.reshape((N*T, D)).std(axis=0).reshape((1, 1, -1))
train_X = (train_X - mean) / std
test_X = (test_X - mean) / std
assert is_valid(train_X)
assert is_valid(test_X)
return train_X, test_X, mean, std
if __name__ == '__main__':
| print('Loading data')
train_X, val_X, mean, std = load_data()
print('Data loaded')
model = train_model(train_X, val_X, mean, std) | conditional_block | |
generate_conv_gan.py | #!/usr/bin/env python3
"""Generate a sequence of poses with convolutional generator and
discriminator."""
from keras.models import Model, Sequential
from keras.layers import Dense, Activation, TimeDistributed, LSTM, \
RepeatVector, Input, Dropout, LeakyReLU, Convolution1D, Flatten, \
BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.utils.generic_utils import Progbar
import numpy as np
import re
from glob import glob
from os import path, mkdir, makedirs
from scipy.io import savemat
from multiprocessing.pool import Pool
from common import GOOD_MOCAP_INDS, insert_junk_entries
np.random.seed(2372143511)
WEIGHTS_PATH = './best-conv-gan-weights.h5'
SEQ_LENGTH = 32
SEQ_NOISE_PAD = 7
NOISE_DIM = 30
BATCH_SIZE = 16
K = 2
def make_generator(pose_size):
x = in_layer = Input(shape=(SEQ_LENGTH + 2 * SEQ_NOISE_PAD, NOISE_DIM))
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(500, 3, border_mode='valid')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = out_layer = Convolution1D(pose_size, 3, border_mode='valid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
def make_discriminator(pose_size):
in_shape = (SEQ_LENGTH, pose_size)
x = in_layer = Input(shape=in_shape)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 7, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(128, 8, border_mode='valid')(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization()(x)
x = Convolution1D(1, 7, border_mode='valid')(x)
x = Flatten()(x)
out_layer = Activation('sigmoid')(x)
model = Model(input=[in_layer], output=[out_layer])
return model
class GANTrainer:
noise_dim = NOISE_DIM
seq_length = SEQ_LENGTH
seq_pad = SEQ_NOISE_PAD
def __init__(self, pose_size, d_lr=0.0002, g_lr=0.00002):
self.discriminator = make_discriminator(pose_size)
# Copy is read-only; it doesn't get compiled
self.discriminator_copy = make_discriminator(pose_size)
self.discriminator_copy.trainable = False
disc_opt = Adam(lr=d_lr, beta_1=0.5)
self.discriminator.compile(disc_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.generator = make_generator(pose_size)
self.generator.compile('sgd', 'mae')
nested = Sequential()
nested.add(self.generator)
nested.add(self.discriminator_copy)
gen_opt = Adam(lr=g_lr, beta_1=0.5)
nested.compile(gen_opt, 'binary_crossentropy',
metrics=['binary_accuracy'])
self.nested_generator = nested
self.num_disc_steps = 0
self.num_gen_steps = 0
def update_disc_copy(self):
"""Copy weights from real discriminator over to nested one. This skirts
a lot of Keras issues with nested, shared models."""
source = self.discriminator
dest = self.discriminator_copy
assert len(source.layers) == len(dest.layers)
for dest_layer, source_layer in zip(dest.layers, source.layers):
dest_layer.set_weights(source_layer.get_weights()) | """Input noise for generator"""
return np.random.randn(num, self.seq_length + 2 * self.seq_pad,
self.noise_dim)
def gen_train_step(self, batch_size):
"""Train the generator to fool the discriminator."""
self.discriminator.trainable = False
labels = [1] * batch_size
noise = self.make_noise(batch_size)
self.update_disc_copy()
self.num_gen_steps += 1
pre_weights = self.discriminator_copy.get_weights()
rv = self.nested_generator.train_on_batch(noise, labels)
self.update_disc_copy()
post_weights = self.discriminator_copy.get_weights()
# The next assertion fails with batch norm when I don't copy. I don't
# know how to stop those layers from updating :(
assert all(np.all(a == b) for a, b in zip(pre_weights, post_weights))
return rv
def disc_train_step(self, true_batch):
"""Get some true poses and train discriminator to distinguish them from
generated poses."""
self.discriminator.trainable = True
poses = self.generate_poses(len(true_batch))
labels = np.array([1] * len(true_batch) + [0] * len(poses))
data = np.concatenate([true_batch, poses])
self.num_disc_steps += 1
# Get back loss
return self.discriminator.train_on_batch(data, labels)
def disc_val(self, val_data, batch_size):
"""Validate discriminator by checking whether it can spot fakes."""
fakes = self.generate_poses(len(val_data))
labels = np.array([1] * len(val_data) + [0] * len(fakes))
data = np.concatenate([val_data, fakes])
return self.discriminator.evaluate(data, labels,
batch_size=batch_size)
def gen_val(self, num_poses, batch_size):
"""Validate generator by figuring out how good it is at fooling
discriminator (closely related to discriminator step; just helps us
break down accuracy a bit)."""
noise = self.make_noise(num_poses)
labels = [1] * num_poses
self.update_disc_copy()
rv = self.nested_generator.evaluate(noise, labels,
batch_size=batch_size)
self.update_disc_copy()
return rv
def generate_poses(self, num, batch_size=BATCH_SIZE):
"""Generate some fixed number of poses. Useful for both generator and
discriminator training."""
return self.generator.predict(self.make_noise(num),
batch_size=batch_size)
def save(self, dest_dir):
"""Save generator and discriminator to some path"""
try:
makedirs(dest_dir)
except FileExistsError:
pass
suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)
gen_path = path.join(dest_dir, 'gen' + suffix)
disc_path = path.join(dest_dir, 'disc' + suffix)
self.discriminator.save(disc_path)
self.generator.save(gen_path)
def train_model(train_X, val_X, mu, sigma):
assert train_X.ndim == 3, train_X.shape
total_X, time_steps, out_shape = train_X.shape
trainer = GANTrainer(out_shape)
epochs = 0
# GAN predictions will be put in here
try:
mkdir('gan-conv-out')
except FileExistsError:
pass
print('Training generator')
while True:
copy_X = train_X.copy()
np.random.shuffle(copy_X)
total_X, _, _ = copy_X.shape
to_fetch = BATCH_SIZE // 2
epochs += 1
print('Epoch %d' % epochs)
bar = Progbar(total_X)
bar.update(0)
epoch_fetched = 0
while epoch_fetched < total_X:
# Fetch some ground truth to train the discriminator
for i in range(K):
if epoch_fetched >= total_X:
break
fetched = copy_X[epoch_fetched:epoch_fetched+to_fetch]
dloss, dacc = trainer.disc_train_step(fetched)
epoch_fetched += len(fetched)
bar.update(epoch_fetched, values=[
('d_loss', dloss), ('d_acc', dacc)
])
# Train the generator (don't worry about loss)
trainer.gen_train_step(BATCH_SIZE)
# End of an epoch, so let's validate models (doesn't work so great,
# TBH)
print('\nValidating')
disc_loss, disc_acc = trainer.disc_val(val_X, BATCH_SIZE)
gen_loss, gen_acc = trainer.gen_val(100, BATCH_SIZE)
print('\nDisc loss/acc: %g/%g' % (disc_loss, disc_acc))
print('Gen loss/acc: %g/%g' % (gen_loss, gen_acc))
# Also save some predictions so that we can monitor training
print('Saving predictions')
poses = trainer.generate_poses(16) * sigma + mean
poses = insert_junk_entries(poses)
savemat('gan-conv-out/gan-conv-preds-epoch-%d.mat' % epochs, {'poses': poses})
# Sometimes we save a model
if not (epochs - 1) % 5:
dest_dir = 'saved-conv-gans/'
print('Saving model to %s' % dest_dir)
trainer.save(dest_dir)
def prepare_file(filename):
poses = np.loadtxt(filename, delimiter=',')
assert poses.ndim == 2 and poses.shape[1] == 99, poses.shape
zero_inds, = np.nonzero((poses != 0).any(axis=0))
assert (zero_inds == GOOD_MOCAP_INDS).all(), zero_inds
poses = poses[:, GOOD_MOCAP_INDS]
seqs = []
end = len(poses) - SEQ_LENGTH + 1
# TODO: May make sense to have a bigger overlap here
step = max(1, min(SEQ_LENGTH // 2, 50))
for start in range(0, end, step):
seqs.append(poses[start:start+SEQ_LENGTH])
return np.stack(seqs)
def is_valid(data):
return np.isfinite(data).all()
_fnre = re.compile(r'^expmap_S(?P<subject>\d+)_(?P<action>.+).txt.gz$')
def mapper(filename):
base = path.basename(filename)
meta = _fnre.match(base).groupdict()
subj_id = int(meta['subject'])
X = prepare_file(filename)
return (subj_id, X)
def load_data():
filenames = glob('h36m-3d-poses/expmap_*.txt.gz')
train_X_blocks = []
test_X_blocks = []
with Pool() as pool:
for subj_id, X in pool.map(mapper, filenames):
if subj_id == 5:
# subject 5 is for testing
test_X_blocks.append(X)
else:
train_X_blocks.append(X)
train_X = np.concatenate(train_X_blocks, axis=0)
test_X = np.concatenate(test_X_blocks, axis=0)
N, T, D = train_X.shape
mean = train_X.reshape((N*T, D)).mean(axis=0).reshape((1, 1, -1))
std = train_X.reshape((N*T, D)).std(axis=0).reshape((1, 1, -1))
train_X = (train_X - mean) / std
test_X = (test_X - mean) / std
assert is_valid(train_X)
assert is_valid(test_X)
return train_X, test_X, mean, std
if __name__ == '__main__':
print('Loading data')
train_X, val_X, mean, std = load_data()
print('Data loaded')
model = train_model(train_X, val_X, mean, std) |
def make_noise(self, num): | random_line_split |
branch.go | // Copyright 2021 Allstar Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package branch implements the Branch Protection security policy.
package branch
import (
"context"
"fmt"
"net/http"
"path"
"github.com/ossf/allstar/pkg/config"
"github.com/ossf/allstar/pkg/config/operator"
"github.com/ossf/allstar/pkg/policydef"
"github.com/google/go-github/v32/github"
"github.com/rs/zerolog/log"
)
const configFile = "branch_protection.yaml"
const polName = "Branch Protection"
// OrgConfig is the org-level config definition for Branch Protection.
type OrgConfig struct {
// OptConfig is the standard org-level opt in/out config, RepoOverride applies to all
// BP config.
OptConfig config.OrgOptConfig `yaml:"optConfig"`
// Action defines which action to take, default log, other: issue...
Action string `yaml:"action"`
// EnforceDefault : set to true to enforce policy on default branch, default true.
EnforceDefault bool `yaml:"enforceDefault"`
// EnforceBranches is a map of repos and branches. These are other
// non-default branches to enforce policy on, such as branches which releases
// are made from.
EnforceBranches map[string][]string `yaml:"enforceBranches"`
// RequireApproval : set to true to enforce approval on PRs, default true.
RequireApproval bool `yaml:"requireApproval"`
// ApprovalCount is the number of required PR approvals, default 1.
ApprovalCount int `yaml:"approvalCount"`
// DismissStale : set to true to require PR approvalse be dismissed when a PR is updated, default true.
DismissStale bool `yaml:"dismissStale"`
// BlockForce : set to true to block force pushes, default true.
BlockForce bool `yaml:"blockForce"`
}
// RepoConfig is the repo-level config for Branch Protection
type RepoConfig struct {
// OptConfig is the standard repo-level opt in/out config.
OptConfig config.RepoOptConfig `yaml:"optConfig"`
// Action overrides the same setting in org-level, only if present.
Action *string `yaml:"action"`
// EnforceDefault overrides the same setting in org-level, only if present.
EnforceDefault *bool `yaml:"enforceDefault"`
// EnforceBranches adds more branches to the org-level list. Does not
// override. Always allowed irrespective of DisableRepoOverride setting.
EnforceBranches []string `yaml:"enforceBranches"`
// RequireApproval overrides the same setting in org-level, only if present.
RequireApproval *bool `yaml:"requireAppproval"`
// ApprovalCount overrides the same setting in org-level, only if present.
ApprovalCount *int `yaml:"approvalCount"`
// DismissStale overrides the same setting in org-level, only if present.
DismissStale *bool `yaml:"dismissStale"`
// BlockForce overrides the same setting in org-level, only if present.
BlockForce *bool `yaml:"blockForce"`
}
type mergedConfig struct {
Action string
EnforceDefault bool
EnforceBranches []string
RequireApproval bool
ApprovalCount int
DismissStale bool
BlockForce bool
}
type details struct {
PRReviews bool
NumReviews int
DismissStale bool
BlockForce bool
}
var configFetchConfig func(context.Context, *github.Client, string, string, string, interface{}) error
func init() {
configFetchConfig = config.FetchConfig
}
// Branch is the Branch Protection policy object, implements policydef.Policy.
type Branch bool
// NewBranch returns a new BranchProtection polcy.
func NewBranch() policydef.Policy {
var b Branch
return b
}
// Name returns the name of this policy, implementing policydef.Policy.Name()
func (b Branch) Name() string {
return polName
}
type repositories interface {
Get(context.Context, string, string) (*github.Repository,
*github.Response, error)
ListBranches(context.Context, string, string, *github.BranchListOptions) (
[]*github.Branch, *github.Response, error)
GetBranchProtection(context.Context, string, string, string) (
*github.Protection, *github.Response, error)
}
// Check performs the polcy check for Branch Protection based on the
// configuration stored in the org/repo, implementing policydef.Policy.Check()
func (b Branch) Check(ctx context.Context, c *github.Client, owner,
repo string) (*policydef.Result, error) {
return check(ctx, c.Repositories, c, owner, repo)
}
func check(ctx context.Context, rep repositories, c *github.Client, owner,
repo string) (*policydef.Result, error) {
oc, rc := getConfig(ctx, c, owner, repo)
enabled := config.IsEnabled(oc.OptConfig, rc.OptConfig, repo)
log.Info().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Bool("enabled", enabled).
Msg("Check repo enabled")
mc := mergeConfig(oc, rc, repo)
r, _, err := rep.Get(ctx, owner, repo)
if err != nil {
return nil, err
}
opt := &github.BranchListOptions{
ListOptions: github.ListOptions{
PerPage: 100,
},
}
var branches []*github.Branch
for {
bs, resp, err := rep.ListBranches(ctx, owner, repo, opt)
if err != nil {
return nil, err
}
branches = append(branches, bs...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
// Don't really need pagination here, only checking if no branches exist.
if len(branches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches to protect",
Details: nil,
}, nil
}
allBranches := mc.EnforceBranches
if mc.EnforceDefault {
allBranches = append(mc.EnforceBranches, r.GetDefaultBranch())
}
if len(allBranches) == 0 |
pass := true
text := ""
ds := make(map[string]details)
for _, b := range allBranches {
p, rsp, err := rep.GetBranchProtection(ctx, owner, repo, b)
if err != nil {
if rsp != nil && rsp.StatusCode == http.StatusNotFound {
// Branch not protected
pass = false
text = text + fmt.Sprintf("No protection found for branch %v\n", b)
ds[b] = details{}
continue
}
return nil, err
}
var d details
rev := p.GetRequiredPullRequestReviews()
if rev != nil {
d.PRReviews = true
d.DismissStale = rev.DismissStaleReviews
if mc.DismissStale && !rev.DismissStaleReviews {
text = text +
fmt.Sprintf("Dismiss stale reviews not configured for branch %v\n", b)
pass = false
}
d.NumReviews = rev.RequiredApprovingReviewCount
if rev.RequiredApprovingReviewCount < mc.ApprovalCount {
pass = false
text = text +
fmt.Sprintf("PR Approvals below threshold %v : %v for branch %v\n",
rev.RequiredApprovingReviewCount, mc.ApprovalCount, b)
}
} else {
if mc.RequireApproval {
pass = false
text = text +
fmt.Sprintf("PR Approvals not configured for branch %v\n", b)
}
}
afp := p.GetAllowForcePushes()
d.BlockForce = true
if afp != nil {
if mc.BlockForce && afp.Enabled {
text = text +
fmt.Sprintf("Block force push not configured for branch %v\n", b)
pass = false
d.BlockForce = false
}
}
ds[b] = d
}
return &policydef.Result{
Enabled: enabled,
Pass: pass,
NotifyText: text,
Details: ds,
}, nil
}
// Fix implementing policydef.Policy.Fix(). Currently not supported. BP plans
// to support this TODO.
func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {
log.Warn().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Msg("Action fix is configured, but not implemented.")
return nil
}
// GetAction returns the configured action from Branch Protection's
// configuration stored in the org-level repo, default log. Implementing
// policydef.Policy.GetAction()
func (b Branch) GetAction(ctx context.Context, c *github.Client, owner, repo string) string {
oc, rc := getConfig(ctx, c, owner, repo)
mc := mergeConfig(oc, rc, repo)
return mc.Action
}
func getConfig(ctx context.Context, c *github.Client, owner, repo string) (*OrgConfig, *RepoConfig) {
oc := &OrgConfig{ // Fill out non-zero defaults
Action: "log",
EnforceDefault: true,
RequireApproval: true,
ApprovalCount: 1,
DismissStale: true,
BlockForce: true,
}
if err := configFetchConfig(ctx, c, owner, operator.OrgConfigRepo, configFile, oc); err != nil {
log.Error().
Str("org", owner).
Str("repo", operator.OrgConfigRepo).
Str("area", polName).
Str("file", configFile).
Err(err).
Msg("Unexpected config error, using defaults.")
}
rc := &RepoConfig{}
if err := configFetchConfig(ctx, c, owner, repo, path.Join(operator.RepoConfigDir, configFile), rc); err != nil {
log.Error().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Str("file", path.Join(operator.RepoConfigDir, configFile)).
Err(err).
Msg("Unexpected config error, using defaults.")
}
return oc, rc
}
func mergeConfig(oc *OrgConfig, rc *RepoConfig, repo string) *mergedConfig {
mc := &mergedConfig{
Action: oc.Action,
EnforceDefault: oc.EnforceDefault,
EnforceBranches: oc.EnforceBranches[repo],
RequireApproval: oc.RequireApproval,
ApprovalCount: oc.ApprovalCount,
DismissStale: oc.DismissStale,
BlockForce: oc.BlockForce,
}
mc.EnforceBranches = append(mc.EnforceBranches, rc.EnforceBranches...)
if !oc.OptConfig.DisableRepoOverride {
if rc.Action != nil {
mc.Action = *rc.Action
}
if rc.EnforceDefault != nil {
mc.EnforceDefault = *rc.EnforceDefault
}
if rc.RequireApproval != nil {
mc.RequireApproval = *rc.RequireApproval
}
if rc.ApprovalCount != nil {
mc.ApprovalCount = *rc.ApprovalCount
}
if rc.DismissStale != nil {
mc.DismissStale = *rc.DismissStale
}
if rc.BlockForce != nil {
mc.BlockForce = *rc.BlockForce
}
}
return mc
}
| {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches configured for enforcement in policy",
Details: nil,
}, nil
} | conditional_block |
branch.go | // Copyright 2021 Allstar Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | // See the License for the specific language governing permissions and
// limitations under the License.
// Package branch implements the Branch Protection security policy.
package branch
import (
"context"
"fmt"
"net/http"
"path"
"github.com/ossf/allstar/pkg/config"
"github.com/ossf/allstar/pkg/config/operator"
"github.com/ossf/allstar/pkg/policydef"
"github.com/google/go-github/v32/github"
"github.com/rs/zerolog/log"
)
const configFile = "branch_protection.yaml"
const polName = "Branch Protection"
// OrgConfig is the org-level config definition for Branch Protection.
type OrgConfig struct {
// OptConfig is the standard org-level opt in/out config, RepoOverride applies to all
// BP config.
OptConfig config.OrgOptConfig `yaml:"optConfig"`
// Action defines which action to take, default log, other: issue...
Action string `yaml:"action"`
// EnforceDefault : set to true to enforce policy on default branch, default true.
EnforceDefault bool `yaml:"enforceDefault"`
// EnforceBranches is a map of repos and branches. These are other
// non-default branches to enforce policy on, such as branches which releases
// are made from.
EnforceBranches map[string][]string `yaml:"enforceBranches"`
// RequireApproval : set to true to enforce approval on PRs, default true.
RequireApproval bool `yaml:"requireApproval"`
// ApprovalCount is the number of required PR approvals, default 1.
ApprovalCount int `yaml:"approvalCount"`
// DismissStale : set to true to require PR approvalse be dismissed when a PR is updated, default true.
DismissStale bool `yaml:"dismissStale"`
// BlockForce : set to true to block force pushes, default true.
BlockForce bool `yaml:"blockForce"`
}
// RepoConfig is the repo-level config for Branch Protection
type RepoConfig struct {
// OptConfig is the standard repo-level opt in/out config.
OptConfig config.RepoOptConfig `yaml:"optConfig"`
// Action overrides the same setting in org-level, only if present.
Action *string `yaml:"action"`
// EnforceDefault overrides the same setting in org-level, only if present.
EnforceDefault *bool `yaml:"enforceDefault"`
// EnforceBranches adds more branches to the org-level list. Does not
// override. Always allowed irrespective of DisableRepoOverride setting.
EnforceBranches []string `yaml:"enforceBranches"`
// RequireApproval overrides the same setting in org-level, only if present.
RequireApproval *bool `yaml:"requireAppproval"`
// ApprovalCount overrides the same setting in org-level, only if present.
ApprovalCount *int `yaml:"approvalCount"`
// DismissStale overrides the same setting in org-level, only if present.
DismissStale *bool `yaml:"dismissStale"`
// BlockForce overrides the same setting in org-level, only if present.
BlockForce *bool `yaml:"blockForce"`
}
type mergedConfig struct {
Action string
EnforceDefault bool
EnforceBranches []string
RequireApproval bool
ApprovalCount int
DismissStale bool
BlockForce bool
}
type details struct {
PRReviews bool
NumReviews int
DismissStale bool
BlockForce bool
}
var configFetchConfig func(context.Context, *github.Client, string, string, string, interface{}) error
func init() {
configFetchConfig = config.FetchConfig
}
// Branch is the Branch Protection policy object, implements policydef.Policy.
type Branch bool
// NewBranch returns a new BranchProtection polcy.
func NewBranch() policydef.Policy {
var b Branch
return b
}
// Name returns the name of this policy, implementing policydef.Policy.Name()
func (b Branch) Name() string {
return polName
}
type repositories interface {
Get(context.Context, string, string) (*github.Repository,
*github.Response, error)
ListBranches(context.Context, string, string, *github.BranchListOptions) (
[]*github.Branch, *github.Response, error)
GetBranchProtection(context.Context, string, string, string) (
*github.Protection, *github.Response, error)
}
// Check performs the polcy check for Branch Protection based on the
// configuration stored in the org/repo, implementing policydef.Policy.Check()
func (b Branch) Check(ctx context.Context, c *github.Client, owner,
repo string) (*policydef.Result, error) {
return check(ctx, c.Repositories, c, owner, repo)
}
func check(ctx context.Context, rep repositories, c *github.Client, owner,
repo string) (*policydef.Result, error) {
oc, rc := getConfig(ctx, c, owner, repo)
enabled := config.IsEnabled(oc.OptConfig, rc.OptConfig, repo)
log.Info().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Bool("enabled", enabled).
Msg("Check repo enabled")
mc := mergeConfig(oc, rc, repo)
r, _, err := rep.Get(ctx, owner, repo)
if err != nil {
return nil, err
}
opt := &github.BranchListOptions{
ListOptions: github.ListOptions{
PerPage: 100,
},
}
var branches []*github.Branch
for {
bs, resp, err := rep.ListBranches(ctx, owner, repo, opt)
if err != nil {
return nil, err
}
branches = append(branches, bs...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
// Don't really need pagination here, only checking if no branches exist.
if len(branches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches to protect",
Details: nil,
}, nil
}
allBranches := mc.EnforceBranches
if mc.EnforceDefault {
allBranches = append(mc.EnforceBranches, r.GetDefaultBranch())
}
if len(allBranches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches configured for enforcement in policy",
Details: nil,
}, nil
}
pass := true
text := ""
ds := make(map[string]details)
for _, b := range allBranches {
p, rsp, err := rep.GetBranchProtection(ctx, owner, repo, b)
if err != nil {
if rsp != nil && rsp.StatusCode == http.StatusNotFound {
// Branch not protected
pass = false
text = text + fmt.Sprintf("No protection found for branch %v\n", b)
ds[b] = details{}
continue
}
return nil, err
}
var d details
rev := p.GetRequiredPullRequestReviews()
if rev != nil {
d.PRReviews = true
d.DismissStale = rev.DismissStaleReviews
if mc.DismissStale && !rev.DismissStaleReviews {
text = text +
fmt.Sprintf("Dismiss stale reviews not configured for branch %v\n", b)
pass = false
}
d.NumReviews = rev.RequiredApprovingReviewCount
if rev.RequiredApprovingReviewCount < mc.ApprovalCount {
pass = false
text = text +
fmt.Sprintf("PR Approvals below threshold %v : %v for branch %v\n",
rev.RequiredApprovingReviewCount, mc.ApprovalCount, b)
}
} else {
if mc.RequireApproval {
pass = false
text = text +
fmt.Sprintf("PR Approvals not configured for branch %v\n", b)
}
}
afp := p.GetAllowForcePushes()
d.BlockForce = true
if afp != nil {
if mc.BlockForce && afp.Enabled {
text = text +
fmt.Sprintf("Block force push not configured for branch %v\n", b)
pass = false
d.BlockForce = false
}
}
ds[b] = d
}
return &policydef.Result{
Enabled: enabled,
Pass: pass,
NotifyText: text,
Details: ds,
}, nil
}
// Fix implementing policydef.Policy.Fix(). Currently not supported. BP plans
// to support this TODO.
func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {
log.Warn().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Msg("Action fix is configured, but not implemented.")
return nil
}
// GetAction returns the configured action from Branch Protection's
// configuration stored in the org-level repo, default log. Implementing
// policydef.Policy.GetAction()
func (b Branch) GetAction(ctx context.Context, c *github.Client, owner, repo string) string {
oc, rc := getConfig(ctx, c, owner, repo)
mc := mergeConfig(oc, rc, repo)
return mc.Action
}
func getConfig(ctx context.Context, c *github.Client, owner, repo string) (*OrgConfig, *RepoConfig) {
oc := &OrgConfig{ // Fill out non-zero defaults
Action: "log",
EnforceDefault: true,
RequireApproval: true,
ApprovalCount: 1,
DismissStale: true,
BlockForce: true,
}
if err := configFetchConfig(ctx, c, owner, operator.OrgConfigRepo, configFile, oc); err != nil {
log.Error().
Str("org", owner).
Str("repo", operator.OrgConfigRepo).
Str("area", polName).
Str("file", configFile).
Err(err).
Msg("Unexpected config error, using defaults.")
}
rc := &RepoConfig{}
if err := configFetchConfig(ctx, c, owner, repo, path.Join(operator.RepoConfigDir, configFile), rc); err != nil {
log.Error().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Str("file", path.Join(operator.RepoConfigDir, configFile)).
Err(err).
Msg("Unexpected config error, using defaults.")
}
return oc, rc
}
func mergeConfig(oc *OrgConfig, rc *RepoConfig, repo string) *mergedConfig {
mc := &mergedConfig{
Action: oc.Action,
EnforceDefault: oc.EnforceDefault,
EnforceBranches: oc.EnforceBranches[repo],
RequireApproval: oc.RequireApproval,
ApprovalCount: oc.ApprovalCount,
DismissStale: oc.DismissStale,
BlockForce: oc.BlockForce,
}
mc.EnforceBranches = append(mc.EnforceBranches, rc.EnforceBranches...)
if !oc.OptConfig.DisableRepoOverride {
if rc.Action != nil {
mc.Action = *rc.Action
}
if rc.EnforceDefault != nil {
mc.EnforceDefault = *rc.EnforceDefault
}
if rc.RequireApproval != nil {
mc.RequireApproval = *rc.RequireApproval
}
if rc.ApprovalCount != nil {
mc.ApprovalCount = *rc.ApprovalCount
}
if rc.DismissStale != nil {
mc.DismissStale = *rc.DismissStale
}
if rc.BlockForce != nil {
mc.BlockForce = *rc.BlockForce
}
}
return mc
} | random_line_split | |
branch.go | // Copyright 2021 Allstar Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package branch implements the Branch Protection security policy.
package branch
import (
"context"
"fmt"
"net/http"
"path"
"github.com/ossf/allstar/pkg/config"
"github.com/ossf/allstar/pkg/config/operator"
"github.com/ossf/allstar/pkg/policydef"
"github.com/google/go-github/v32/github"
"github.com/rs/zerolog/log"
)
const configFile = "branch_protection.yaml"
const polName = "Branch Protection"
// OrgConfig is the org-level config definition for Branch Protection.
type OrgConfig struct {
// OptConfig is the standard org-level opt in/out config, RepoOverride applies to all
// BP config.
OptConfig config.OrgOptConfig `yaml:"optConfig"`
// Action defines which action to take, default log, other: issue...
Action string `yaml:"action"`
// EnforceDefault : set to true to enforce policy on default branch, default true.
EnforceDefault bool `yaml:"enforceDefault"`
// EnforceBranches is a map of repos and branches. These are other
// non-default branches to enforce policy on, such as branches which releases
// are made from.
EnforceBranches map[string][]string `yaml:"enforceBranches"`
// RequireApproval : set to true to enforce approval on PRs, default true.
RequireApproval bool `yaml:"requireApproval"`
// ApprovalCount is the number of required PR approvals, default 1.
ApprovalCount int `yaml:"approvalCount"`
// DismissStale : set to true to require PR approvalse be dismissed when a PR is updated, default true.
DismissStale bool `yaml:"dismissStale"`
// BlockForce : set to true to block force pushes, default true.
BlockForce bool `yaml:"blockForce"`
}
// RepoConfig is the repo-level config for Branch Protection
type RepoConfig struct {
// OptConfig is the standard repo-level opt in/out config.
OptConfig config.RepoOptConfig `yaml:"optConfig"`
// Action overrides the same setting in org-level, only if present.
Action *string `yaml:"action"`
// EnforceDefault overrides the same setting in org-level, only if present.
EnforceDefault *bool `yaml:"enforceDefault"`
// EnforceBranches adds more branches to the org-level list. Does not
// override. Always allowed irrespective of DisableRepoOverride setting.
EnforceBranches []string `yaml:"enforceBranches"`
// RequireApproval overrides the same setting in org-level, only if present.
RequireApproval *bool `yaml:"requireAppproval"`
// ApprovalCount overrides the same setting in org-level, only if present.
ApprovalCount *int `yaml:"approvalCount"`
// DismissStale overrides the same setting in org-level, only if present.
DismissStale *bool `yaml:"dismissStale"`
// BlockForce overrides the same setting in org-level, only if present.
BlockForce *bool `yaml:"blockForce"`
}
type mergedConfig struct {
Action string
EnforceDefault bool
EnforceBranches []string
RequireApproval bool
ApprovalCount int
DismissStale bool
BlockForce bool
}
type details struct {
PRReviews bool
NumReviews int
DismissStale bool
BlockForce bool
}
var configFetchConfig func(context.Context, *github.Client, string, string, string, interface{}) error
func init() {
configFetchConfig = config.FetchConfig
}
// Branch is the Branch Protection policy object, implements policydef.Policy.
type Branch bool
// NewBranch returns a new BranchProtection polcy.
func NewBranch() policydef.Policy {
var b Branch
return b
}
// Name returns the name of this policy, implementing policydef.Policy.Name()
func (b Branch) Name() string {
return polName
}
type repositories interface {
Get(context.Context, string, string) (*github.Repository,
*github.Response, error)
ListBranches(context.Context, string, string, *github.BranchListOptions) (
[]*github.Branch, *github.Response, error)
GetBranchProtection(context.Context, string, string, string) (
*github.Protection, *github.Response, error)
}
// Check performs the polcy check for Branch Protection based on the
// configuration stored in the org/repo, implementing policydef.Policy.Check()
func (b Branch) Check(ctx context.Context, c *github.Client, owner,
repo string) (*policydef.Result, error) {
return check(ctx, c.Repositories, c, owner, repo)
}
func check(ctx context.Context, rep repositories, c *github.Client, owner,
repo string) (*policydef.Result, error) {
oc, rc := getConfig(ctx, c, owner, repo)
enabled := config.IsEnabled(oc.OptConfig, rc.OptConfig, repo)
log.Info().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Bool("enabled", enabled).
Msg("Check repo enabled")
mc := mergeConfig(oc, rc, repo)
r, _, err := rep.Get(ctx, owner, repo)
if err != nil {
return nil, err
}
opt := &github.BranchListOptions{
ListOptions: github.ListOptions{
PerPage: 100,
},
}
var branches []*github.Branch
for {
bs, resp, err := rep.ListBranches(ctx, owner, repo, opt)
if err != nil {
return nil, err
}
branches = append(branches, bs...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
// Don't really need pagination here, only checking if no branches exist.
if len(branches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches to protect",
Details: nil,
}, nil
}
allBranches := mc.EnforceBranches
if mc.EnforceDefault {
allBranches = append(mc.EnforceBranches, r.GetDefaultBranch())
}
if len(allBranches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches configured for enforcement in policy",
Details: nil,
}, nil
}
pass := true
text := ""
ds := make(map[string]details)
for _, b := range allBranches {
p, rsp, err := rep.GetBranchProtection(ctx, owner, repo, b)
if err != nil {
if rsp != nil && rsp.StatusCode == http.StatusNotFound {
// Branch not protected
pass = false
text = text + fmt.Sprintf("No protection found for branch %v\n", b)
ds[b] = details{}
continue
}
return nil, err
}
var d details
rev := p.GetRequiredPullRequestReviews()
if rev != nil {
d.PRReviews = true
d.DismissStale = rev.DismissStaleReviews
if mc.DismissStale && !rev.DismissStaleReviews {
text = text +
fmt.Sprintf("Dismiss stale reviews not configured for branch %v\n", b)
pass = false
}
d.NumReviews = rev.RequiredApprovingReviewCount
if rev.RequiredApprovingReviewCount < mc.ApprovalCount {
pass = false
text = text +
fmt.Sprintf("PR Approvals below threshold %v : %v for branch %v\n",
rev.RequiredApprovingReviewCount, mc.ApprovalCount, b)
}
} else {
if mc.RequireApproval {
pass = false
text = text +
fmt.Sprintf("PR Approvals not configured for branch %v\n", b)
}
}
afp := p.GetAllowForcePushes()
d.BlockForce = true
if afp != nil {
if mc.BlockForce && afp.Enabled {
text = text +
fmt.Sprintf("Block force push not configured for branch %v\n", b)
pass = false
d.BlockForce = false
}
}
ds[b] = d
}
return &policydef.Result{
Enabled: enabled,
Pass: pass,
NotifyText: text,
Details: ds,
}, nil
}
// Fix implementing policydef.Policy.Fix(). Currently not supported. BP plans
// to support this TODO.
func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {
log.Warn().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Msg("Action fix is configured, but not implemented.")
return nil
}
// GetAction returns the configured action from Branch Protection's
// configuration stored in the org-level repo, default log. Implementing
// policydef.Policy.GetAction()
func (b Branch) GetAction(ctx context.Context, c *github.Client, owner, repo string) string |
func getConfig(ctx context.Context, c *github.Client, owner, repo string) (*OrgConfig, *RepoConfig) {
oc := &OrgConfig{ // Fill out non-zero defaults
Action: "log",
EnforceDefault: true,
RequireApproval: true,
ApprovalCount: 1,
DismissStale: true,
BlockForce: true,
}
if err := configFetchConfig(ctx, c, owner, operator.OrgConfigRepo, configFile, oc); err != nil {
log.Error().
Str("org", owner).
Str("repo", operator.OrgConfigRepo).
Str("area", polName).
Str("file", configFile).
Err(err).
Msg("Unexpected config error, using defaults.")
}
rc := &RepoConfig{}
if err := configFetchConfig(ctx, c, owner, repo, path.Join(operator.RepoConfigDir, configFile), rc); err != nil {
log.Error().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Str("file", path.Join(operator.RepoConfigDir, configFile)).
Err(err).
Msg("Unexpected config error, using defaults.")
}
return oc, rc
}
func mergeConfig(oc *OrgConfig, rc *RepoConfig, repo string) *mergedConfig {
mc := &mergedConfig{
Action: oc.Action,
EnforceDefault: oc.EnforceDefault,
EnforceBranches: oc.EnforceBranches[repo],
RequireApproval: oc.RequireApproval,
ApprovalCount: oc.ApprovalCount,
DismissStale: oc.DismissStale,
BlockForce: oc.BlockForce,
}
mc.EnforceBranches = append(mc.EnforceBranches, rc.EnforceBranches...)
if !oc.OptConfig.DisableRepoOverride {
if rc.Action != nil {
mc.Action = *rc.Action
}
if rc.EnforceDefault != nil {
mc.EnforceDefault = *rc.EnforceDefault
}
if rc.RequireApproval != nil {
mc.RequireApproval = *rc.RequireApproval
}
if rc.ApprovalCount != nil {
mc.ApprovalCount = *rc.ApprovalCount
}
if rc.DismissStale != nil {
mc.DismissStale = *rc.DismissStale
}
if rc.BlockForce != nil {
mc.BlockForce = *rc.BlockForce
}
}
return mc
}
| {
oc, rc := getConfig(ctx, c, owner, repo)
mc := mergeConfig(oc, rc, repo)
return mc.Action
} | identifier_body |
branch.go | // Copyright 2021 Allstar Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package branch implements the Branch Protection security policy.
package branch
import (
"context"
"fmt"
"net/http"
"path"
"github.com/ossf/allstar/pkg/config"
"github.com/ossf/allstar/pkg/config/operator"
"github.com/ossf/allstar/pkg/policydef"
"github.com/google/go-github/v32/github"
"github.com/rs/zerolog/log"
)
const configFile = "branch_protection.yaml"
const polName = "Branch Protection"
// OrgConfig is the org-level config definition for Branch Protection.
type OrgConfig struct {
// OptConfig is the standard org-level opt in/out config, RepoOverride applies to all
// BP config.
OptConfig config.OrgOptConfig `yaml:"optConfig"`
// Action defines which action to take, default log, other: issue...
Action string `yaml:"action"`
// EnforceDefault : set to true to enforce policy on default branch, default true.
EnforceDefault bool `yaml:"enforceDefault"`
// EnforceBranches is a map of repos and branches. These are other
// non-default branches to enforce policy on, such as branches which releases
// are made from.
EnforceBranches map[string][]string `yaml:"enforceBranches"`
// RequireApproval : set to true to enforce approval on PRs, default true.
RequireApproval bool `yaml:"requireApproval"`
// ApprovalCount is the number of required PR approvals, default 1.
ApprovalCount int `yaml:"approvalCount"`
// DismissStale : set to true to require PR approvalse be dismissed when a PR is updated, default true.
DismissStale bool `yaml:"dismissStale"`
// BlockForce : set to true to block force pushes, default true.
BlockForce bool `yaml:"blockForce"`
}
// RepoConfig is the repo-level config for Branch Protection
type RepoConfig struct {
// OptConfig is the standard repo-level opt in/out config.
OptConfig config.RepoOptConfig `yaml:"optConfig"`
// Action overrides the same setting in org-level, only if present.
Action *string `yaml:"action"`
// EnforceDefault overrides the same setting in org-level, only if present.
EnforceDefault *bool `yaml:"enforceDefault"`
// EnforceBranches adds more branches to the org-level list. Does not
// override. Always allowed irrespective of DisableRepoOverride setting.
EnforceBranches []string `yaml:"enforceBranches"`
// RequireApproval overrides the same setting in org-level, only if present.
RequireApproval *bool `yaml:"requireAppproval"`
// ApprovalCount overrides the same setting in org-level, only if present.
ApprovalCount *int `yaml:"approvalCount"`
// DismissStale overrides the same setting in org-level, only if present.
DismissStale *bool `yaml:"dismissStale"`
// BlockForce overrides the same setting in org-level, only if present.
BlockForce *bool `yaml:"blockForce"`
}
type mergedConfig struct {
Action string
EnforceDefault bool
EnforceBranches []string
RequireApproval bool
ApprovalCount int
DismissStale bool
BlockForce bool
}
type details struct {
PRReviews bool
NumReviews int
DismissStale bool
BlockForce bool
}
var configFetchConfig func(context.Context, *github.Client, string, string, string, interface{}) error
func init() {
configFetchConfig = config.FetchConfig
}
// Branch is the Branch Protection policy object, implements policydef.Policy.
type Branch bool
// NewBranch returns a new BranchProtection polcy.
func NewBranch() policydef.Policy {
var b Branch
return b
}
// Name returns the name of this policy, implementing policydef.Policy.Name()
func (b Branch) Name() string {
return polName
}
type repositories interface {
Get(context.Context, string, string) (*github.Repository,
*github.Response, error)
ListBranches(context.Context, string, string, *github.BranchListOptions) (
[]*github.Branch, *github.Response, error)
GetBranchProtection(context.Context, string, string, string) (
*github.Protection, *github.Response, error)
}
// Check performs the polcy check for Branch Protection based on the
// configuration stored in the org/repo, implementing policydef.Policy.Check()
func (b Branch) Check(ctx context.Context, c *github.Client, owner,
repo string) (*policydef.Result, error) {
return check(ctx, c.Repositories, c, owner, repo)
}
func check(ctx context.Context, rep repositories, c *github.Client, owner,
repo string) (*policydef.Result, error) {
oc, rc := getConfig(ctx, c, owner, repo)
enabled := config.IsEnabled(oc.OptConfig, rc.OptConfig, repo)
log.Info().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Bool("enabled", enabled).
Msg("Check repo enabled")
mc := mergeConfig(oc, rc, repo)
r, _, err := rep.Get(ctx, owner, repo)
if err != nil {
return nil, err
}
opt := &github.BranchListOptions{
ListOptions: github.ListOptions{
PerPage: 100,
},
}
var branches []*github.Branch
for {
bs, resp, err := rep.ListBranches(ctx, owner, repo, opt)
if err != nil {
return nil, err
}
branches = append(branches, bs...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
// Don't really need pagination here, only checking if no branches exist.
if len(branches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches to protect",
Details: nil,
}, nil
}
allBranches := mc.EnforceBranches
if mc.EnforceDefault {
allBranches = append(mc.EnforceBranches, r.GetDefaultBranch())
}
if len(allBranches) == 0 {
return &policydef.Result{
Enabled: enabled,
Pass: true,
NotifyText: "No branches configured for enforcement in policy",
Details: nil,
}, nil
}
pass := true
text := ""
ds := make(map[string]details)
for _, b := range allBranches {
p, rsp, err := rep.GetBranchProtection(ctx, owner, repo, b)
if err != nil {
if rsp != nil && rsp.StatusCode == http.StatusNotFound {
// Branch not protected
pass = false
text = text + fmt.Sprintf("No protection found for branch %v\n", b)
ds[b] = details{}
continue
}
return nil, err
}
var d details
rev := p.GetRequiredPullRequestReviews()
if rev != nil {
d.PRReviews = true
d.DismissStale = rev.DismissStaleReviews
if mc.DismissStale && !rev.DismissStaleReviews {
text = text +
fmt.Sprintf("Dismiss stale reviews not configured for branch %v\n", b)
pass = false
}
d.NumReviews = rev.RequiredApprovingReviewCount
if rev.RequiredApprovingReviewCount < mc.ApprovalCount {
pass = false
text = text +
fmt.Sprintf("PR Approvals below threshold %v : %v for branch %v\n",
rev.RequiredApprovingReviewCount, mc.ApprovalCount, b)
}
} else {
if mc.RequireApproval {
pass = false
text = text +
fmt.Sprintf("PR Approvals not configured for branch %v\n", b)
}
}
afp := p.GetAllowForcePushes()
d.BlockForce = true
if afp != nil {
if mc.BlockForce && afp.Enabled {
text = text +
fmt.Sprintf("Block force push not configured for branch %v\n", b)
pass = false
d.BlockForce = false
}
}
ds[b] = d
}
return &policydef.Result{
Enabled: enabled,
Pass: pass,
NotifyText: text,
Details: ds,
}, nil
}
// Fix implementing policydef.Policy.Fix(). Currently not supported. BP plans
// to support this TODO.
func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {
log.Warn().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Msg("Action fix is configured, but not implemented.")
return nil
}
// GetAction returns the configured action from Branch Protection's
// configuration stored in the org-level repo, default log. Implementing
// policydef.Policy.GetAction()
func (b Branch) | (ctx context.Context, c *github.Client, owner, repo string) string {
oc, rc := getConfig(ctx, c, owner, repo)
mc := mergeConfig(oc, rc, repo)
return mc.Action
}
func getConfig(ctx context.Context, c *github.Client, owner, repo string) (*OrgConfig, *RepoConfig) {
oc := &OrgConfig{ // Fill out non-zero defaults
Action: "log",
EnforceDefault: true,
RequireApproval: true,
ApprovalCount: 1,
DismissStale: true,
BlockForce: true,
}
if err := configFetchConfig(ctx, c, owner, operator.OrgConfigRepo, configFile, oc); err != nil {
log.Error().
Str("org", owner).
Str("repo", operator.OrgConfigRepo).
Str("area", polName).
Str("file", configFile).
Err(err).
Msg("Unexpected config error, using defaults.")
}
rc := &RepoConfig{}
if err := configFetchConfig(ctx, c, owner, repo, path.Join(operator.RepoConfigDir, configFile), rc); err != nil {
log.Error().
Str("org", owner).
Str("repo", repo).
Str("area", polName).
Str("file", path.Join(operator.RepoConfigDir, configFile)).
Err(err).
Msg("Unexpected config error, using defaults.")
}
return oc, rc
}
func mergeConfig(oc *OrgConfig, rc *RepoConfig, repo string) *mergedConfig {
mc := &mergedConfig{
Action: oc.Action,
EnforceDefault: oc.EnforceDefault,
EnforceBranches: oc.EnforceBranches[repo],
RequireApproval: oc.RequireApproval,
ApprovalCount: oc.ApprovalCount,
DismissStale: oc.DismissStale,
BlockForce: oc.BlockForce,
}
mc.EnforceBranches = append(mc.EnforceBranches, rc.EnforceBranches...)
if !oc.OptConfig.DisableRepoOverride {
if rc.Action != nil {
mc.Action = *rc.Action
}
if rc.EnforceDefault != nil {
mc.EnforceDefault = *rc.EnforceDefault
}
if rc.RequireApproval != nil {
mc.RequireApproval = *rc.RequireApproval
}
if rc.ApprovalCount != nil {
mc.ApprovalCount = *rc.ApprovalCount
}
if rc.DismissStale != nil {
mc.DismissStale = *rc.DismissStale
}
if rc.BlockForce != nil {
mc.BlockForce = *rc.BlockForce
}
}
return mc
}
| GetAction | identifier_name |
data_mining_project3.py | import tweepy
import json
import csv
import pandas as pd
import re
from _operator import pos
from jedi.parser.tokenize import number
### Using StreamListener class for collecting the tweets.
class listener(tweepy.StreamListener):
def on_data(self, data):
global n ### Since we have to use this variable in main function, we set it as global varible.
if (n < count):
#print (data)
status = json.loads(data)
try:
try:
with open('tweets.csv', 'a+',newline='') as f: ### Create a csv file which will contain streamed tweets.
writer = csv.writer(f)
writer.writerow([status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text']])
#print (status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text'],"\n")
#print ('')
n += 1 ### If only the tweets is collected without errors, increse the number of total colleted tweets.
except UnicodeEncodeError:
pass
except KeyError:
pass
return True
else :
return False
def on_error(self, status):
print('error : ', status)
def printss():
print("Hello")
'''
Given a list of strings, the follwoing subroutine outputs the frequency of each item in the list.
input: list
output: dictionary
example: input: ['a', 'a', 'b']
output: {('a':2), ('b':1)}
'''
def frequencyCounter(ArrayOfStrings):
#print(type(ArrayOfStrings[2]))
dictionary = {}
regex = re.compile('[^a-zA-Z$]')
for items in ArrayOfStrings:
temp = items.split()
for i in temp:
|
return(dictionary)
'''
This subroutine calculates the total number of different words in the dictionary.
input: dictionary
output: count, which is an int variable.
'''
def totalNumberOfDifferentWords(dictionary):
count = 0
for i in dictionary.values():
count = count + int(i)
return count
'''
The following subroutine is called by the determinePolarity subroutine.
This simply calculates the probability of words according to Naive Bayes Classifier.
input: dictionary, size of dictionary, differentWords in dictionary, test string
output: probability of the words occuring. It'a a single float variable.
'''
def classifierForPolarity(dictionary, totalSize, differentWords, strings):
regex = re.compile('[^a-zA-Z$]')
wordList = strings.split()
divider = differentWords + totalSize
count = 1
probabilityList = []
for i in wordList:
i = regex.sub('', i)
if str(i) in dictionary.keys():
temp = dictionary[str(i)]
count += int(temp)
else:
count = count
classify = count / divider
probabilityList.append(classify)
classify = 0
count = 1
totalProb = 1
for i in probabilityList:
totalProb = totalProb * i
return(totalProb*0.5)
'''
This subroutine is called by the main program. It uses all available lists, dictionaries and other
data to determine the polarity of a tweet.
Probability of a tweet is calculated by the classifierForPolarity subroutine. Both the dictionaries
are used for this part.
Depending on which probability is larger, polarity of the tweet is determined.
If there is 50% or more similarity between the probabilities,
polarity is determined as neutral.
input: dictionries, lists, int variables
output: list
'''
def determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData):
result = []
regex = re.compile('[^a-zA-Z$]')
tempCount = 0
for i in arrayTestData:
testString = str(i)
probEthnicity = classifierForPolarity(dictEthnicity, sizeOfEthnicity, differentWordsInEthnicity, testString)
probReligion = classifierForPolarity(dictReligion, sizeOfReligion, differentWordsInReligion, testString)
probSexualOrientation = classifierForPolarity(dictSexualOrientation, sizeOfSexualOrientation, differentWordsInSexualOrientation, testString)
probOtherRacialBias = classifierForPolarity(dictOtherRacialBias, sizeOfOtherRacialBias, differentWordsInOtherRacialBias, testString)
probPositiveTweet = classifierForPolarity(dictPositiveTweet, sizeOfPositiveTweet, differentWordsInPositiveTweet, testString)
probNeutralTweet = classifierForPolarity(dictNeutralTweet, sizeOfNeutralTweet, differentWordsInNeutralTweet, testString)
probNegativeTweet = classifierForPolarity(dictNegativeTweet, sizeOfNegativeTweet, differentWordsInNegativeTweet, testString)
temp = [tempCount, probEthnicity, probReligion, probSexualOrientation, probOtherRacialBias, probPositiveTweet, probNeutralTweet, probNegativeTweet]
result.append(temp)
temp = []
tempCount += 1
return(result)
'''
The following program sorts results of a 2d array based on a parameter.
'''
def sortResult(result, parameter):
sortedTweets = sorted(result, key=lambda x: x[parameter], reverse=True)
return(sortedTweets)
### Authentication details.
consumer_key = 'uAJx6MwKhYDMKamBrarSiGUTd'
consumer_secret = 'na8DAWlfuHSQllqVgcAcUtCSCblL6RtaFmvePFoA1fBUn03d1d'
access_token = '121692329-Q4fTuw0FmJeGkIHLnmxgX7u9ajpHiyIDrLwsnLVp'
access_token_secret = 'fA26olnFJH26y8t1tPOLXs7oXorHLrjyeSk5YKEokrSMX'
count = 0
n = 1
### was previously part of main program
print ("How many tweets do you want to collect?:")
### global count
count = int(input())
print ("Start to collect the tweets")
### Open the csv file saved streamed tweets.
with open('tweets.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(["user_id","created_at","location","favorite_count","retweet_count","text"])
l = listener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
### Set the keywords that related with racisim that we think and collect the tweets which contain at least one of the those keywords.
stream = tweepy.Stream(auth, l)
stream.filter(track=['racism','racist','racial','race','discrimination','black','white','skinhead', 'sexism', 'LGBT','lesbian','gay','bisexual','transgender', 'homophobia', 'male', 'female', 'misogynistic', 'asian', 'african american', 'hispanic', 'native american','christian','jew','muslim','hindu','buddhist','bigotry', 'prejudice','kkk','negro','honkie','chink','yankee','ethnic','unfairness','racialism','color-blind','segregation'])
#print("Program Finished")
pandaData = pd.read_csv('groundTruth.csv', encoding = "ISO-8859-1")
chosenColumns = [0,1,2]
arrayData = pandaData.as_matrix(columns = pandaData.columns[chosenColumns])
ethnicity = []
religion = []
sexualOrientation = []
otherRacialBias = []
positiveTweet = []
negativeTweet = []
neutralTweet = []
for i,j in enumerate(arrayData):
if j[1] == 1:
ethnicity.append(j[0])
elif j[1] == 2:
religion.append(j[0])
elif j[1] == 3:
sexualOrientation.append(j[0])
elif j[1] == 4:
otherRacialBias.append(j[0])
for i,j in enumerate(arrayData):
if j[2] == 0:
negativeTweet.append(j[0])
elif j[2] == 1:
neutralTweet.append(j[0])
elif j[2] == 2:
positiveTweet.append(j[0])
pandaTestData = pd.read_csv('tweets.csv',encoding = "ISO-8859-1")
chosenTestColumn = [5]
arrayTestData = pandaTestData.as_matrix(columns = pandaTestData.columns[chosenTestColumn])
#print(arrayTestData)
'''
The following code segment counts the frequency of the words.
input type: list
output type: dictionary
'''
dictEthnicity = frequencyCounter(ethnicity)
dictReligion = frequencyCounter(religion)
dictSexualOrientation = frequencyCounter(sexualOrientation)
dictOtherRacialBias = frequencyCounter(otherRacialBias)
dictPositiveTweet = frequencyCounter(positiveTweet)
dictNegativeTweet = frequencyCounter(negativeTweet)
dictNeutralTweet = frequencyCounter(neutralTweet)
'''
The following code segment counts the total number of words in
each dictionary.
'''
sizeOfEthnicity = totalNumberOfDifferentWords(dictEthnicity)
sizeOfReligion = totalNumberOfDifferentWords(dictReligion)
sizeOfSexualOrientation = totalNumberOfDifferentWords(dictSexualOrientation)
sizeOfOtherRacialBias = totalNumberOfDifferentWords(dictOtherRacialBias)
sizeOfPositiveTweet = totalNumberOfDifferentWords(dictPositiveTweet)
sizeOfNegativeTweet = totalNumberOfDifferentWords(dictNegativeTweet)
sizeOfNeutralTweet = totalNumberOfDifferentWords(dictNeutralTweet)
differentWordsInEthnicity = len(dictEthnicity)
differentWordsInReligion = len(dictReligion)
differentWordsInSexualOrientation = len(dictSexualOrientation)
differentWordsInOtherRacialBias = len(dictOtherRacialBias)
differentWordsInPositiveTweet = len(dictPositiveTweet)
differentWordsInNegativeTweet = len(dictNegativeTweet)
differentWordsInNeutralTweet = len(dictNeutralTweet)
result = determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData)
'''
sortedByEthnicity = sorted(result, key=lambda x: x[1], reverse=True)
sortedByReligion = sorted(result, key=lambda x: x[2], reverse=True)
sortedBySexualOrientation = sorted(result, key=lambda x: x[3], reverse=True)
sortedByOtherRacialBias = sorted(result, key=lambda x: x[4], reverse=True)
sortedByPositiveTweets = sorted(result, key=lambda x: x[5], reverse=True)
sortedByNeutralTweets = sorted(result, key=lambda x: x[6], reverse=True)
sortedByNegativeTweets = sorted(result, key=lambda x: x[7], reverse=True)
'''
print("How do you want to sort? \n 1 for Ethnicity \n 2 for Religion \n 3 for Sexual Orientation \n 4 for Other Racial Bias \n 5 for positive attitude \n 6 for neutral attitude \n 7 for negative attitude")
parameter = int(input())
sortedTweetsByParameter = sortResult(result, parameter)
with open('outputWithAllProbability.csv','w', newline='') as of:
writer = csv.writer(of)
writer.writerow(["user_id","ethnicity", "religion", "sexualOrientation", "OtherRacialBias", "Positive Attitude", "Neutral Attitude", "Negative Attitude"])
with open('outputWithAllProbability.csv', 'a+',newline='') as of:
for i in sortedTweetsByParameter:
print(i, file=of)
print("How many accounts do you want to find?")
numberOfAccounts = int(input())
userColumn = [0]
topAccountArray = pandaTestData.as_matrix(columns = pandaTestData.columns[userColumn])
tempList = []
for i in range(numberOfAccounts):
tempList.append(sortedTweetsByParameter[i][0])
topAccountList = []
for i in tempList:
temp = int(i)
topAccountList.append(topAccountArray[temp][0])
with open('output.csv','w') as out_file:
for i in topAccountList:
print(i, file=out_file)
print("Please check the CSV file for the persons responsible. Thanks.")
| i = regex.sub('', i)
if i not in dictionary.keys():
dictionary[i] = 1
else:
dictionary[i] += 1 | conditional_block |
data_mining_project3.py | import tweepy
import json
import csv
import pandas as pd
import re
from _operator import pos
from jedi.parser.tokenize import number
### Using StreamListener class for collecting the tweets.
class listener(tweepy.StreamListener):
def on_data(self, data):
global n ### Since we have to use this variable in main function, we set it as global varible.
if (n < count):
#print (data)
status = json.loads(data)
try:
try:
with open('tweets.csv', 'a+',newline='') as f: ### Create a csv file which will contain streamed tweets.
writer = csv.writer(f)
writer.writerow([status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text']])
#print (status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text'],"\n")
#print ('')
n += 1 ### If only the tweets is collected without errors, increse the number of total colleted tweets.
except UnicodeEncodeError:
pass
except KeyError:
pass
return True
else :
return False
def on_error(self, status):
print('error : ', status)
def printss():
print("Hello")
'''
Given a list of strings, the follwoing subroutine outputs the frequency of each item in the list.
input: list
output: dictionary
example: input: ['a', 'a', 'b']
output: {('a':2), ('b':1)}
'''
def frequencyCounter(ArrayOfStrings):
#print(type(ArrayOfStrings[2]))
dictionary = {}
regex = re.compile('[^a-zA-Z$]')
for items in ArrayOfStrings:
temp = items.split()
for i in temp:
i = regex.sub('', i)
if i not in dictionary.keys():
dictionary[i] = 1
else:
dictionary[i] += 1
return(dictionary)
'''
This subroutine calculates the total number of different words in the dictionary.
input: dictionary
output: count, which is an int variable.
'''
def totalNumberOfDifferentWords(dictionary):
count = 0
for i in dictionary.values():
count = count + int(i)
return count
'''
The following subroutine is called by the determinePolarity subroutine.
This simply calculates the probability of words according to Naive Bayes Classifier.
input: dictionary, size of dictionary, differentWords in dictionary, test string
output: probability of the words occuring. It'a a single float variable.
'''
def classifierForPolarity(dictionary, totalSize, differentWords, strings):
regex = re.compile('[^a-zA-Z$]')
wordList = strings.split()
divider = differentWords + totalSize
count = 1
probabilityList = []
for i in wordList:
i = regex.sub('', i)
if str(i) in dictionary.keys():
temp = dictionary[str(i)]
count += int(temp)
else:
count = count
classify = count / divider
probabilityList.append(classify)
classify = 0
count = 1
totalProb = 1
for i in probabilityList:
totalProb = totalProb * i
return(totalProb*0.5)
'''
This subroutine is called by the main program. It uses all available lists, dictionaries and other
data to determine the polarity of a tweet.
Probability of a tweet is calculated by the classifierForPolarity subroutine. Both the dictionaries
are used for this part.
Depending on which probability is larger, polarity of the tweet is determined.
If there is 50% or more similarity between the probabilities,
polarity is determined as neutral.
input: dictionries, lists, int variables
output: list
'''
def determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData):
|
'''
The following program sorts results of a 2d array based on a parameter.
'''
def sortResult(result, parameter):
sortedTweets = sorted(result, key=lambda x: x[parameter], reverse=True)
return(sortedTweets)
### Authentication details.
consumer_key = 'uAJx6MwKhYDMKamBrarSiGUTd'
consumer_secret = 'na8DAWlfuHSQllqVgcAcUtCSCblL6RtaFmvePFoA1fBUn03d1d'
access_token = '121692329-Q4fTuw0FmJeGkIHLnmxgX7u9ajpHiyIDrLwsnLVp'
access_token_secret = 'fA26olnFJH26y8t1tPOLXs7oXorHLrjyeSk5YKEokrSMX'
count = 0
n = 1
### was previously part of main program
print ("How many tweets do you want to collect?:")
### global count
count = int(input())
print ("Start to collect the tweets")
### Open the csv file saved streamed tweets.
with open('tweets.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(["user_id","created_at","location","favorite_count","retweet_count","text"])
l = listener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
### Set the keywords that related with racisim that we think and collect the tweets which contain at least one of the those keywords.
stream = tweepy.Stream(auth, l)
stream.filter(track=['racism','racist','racial','race','discrimination','black','white','skinhead', 'sexism', 'LGBT','lesbian','gay','bisexual','transgender', 'homophobia', 'male', 'female', 'misogynistic', 'asian', 'african american', 'hispanic', 'native american','christian','jew','muslim','hindu','buddhist','bigotry', 'prejudice','kkk','negro','honkie','chink','yankee','ethnic','unfairness','racialism','color-blind','segregation'])
#print("Program Finished")
pandaData = pd.read_csv('groundTruth.csv', encoding = "ISO-8859-1")
chosenColumns = [0,1,2]
arrayData = pandaData.as_matrix(columns = pandaData.columns[chosenColumns])
ethnicity = []
religion = []
sexualOrientation = []
otherRacialBias = []
positiveTweet = []
negativeTweet = []
neutralTweet = []
for i,j in enumerate(arrayData):
if j[1] == 1:
ethnicity.append(j[0])
elif j[1] == 2:
religion.append(j[0])
elif j[1] == 3:
sexualOrientation.append(j[0])
elif j[1] == 4:
otherRacialBias.append(j[0])
for i,j in enumerate(arrayData):
if j[2] == 0:
negativeTweet.append(j[0])
elif j[2] == 1:
neutralTweet.append(j[0])
elif j[2] == 2:
positiveTweet.append(j[0])
pandaTestData = pd.read_csv('tweets.csv',encoding = "ISO-8859-1")
chosenTestColumn = [5]
arrayTestData = pandaTestData.as_matrix(columns = pandaTestData.columns[chosenTestColumn])
#print(arrayTestData)
'''
The following code segment counts the frequency of the words.
input type: list
output type: dictionary
'''
dictEthnicity = frequencyCounter(ethnicity)
dictReligion = frequencyCounter(religion)
dictSexualOrientation = frequencyCounter(sexualOrientation)
dictOtherRacialBias = frequencyCounter(otherRacialBias)
dictPositiveTweet = frequencyCounter(positiveTweet)
dictNegativeTweet = frequencyCounter(negativeTweet)
dictNeutralTweet = frequencyCounter(neutralTweet)
'''
The following code segment counts the total number of words in
each dictionary.
'''
sizeOfEthnicity = totalNumberOfDifferentWords(dictEthnicity)
sizeOfReligion = totalNumberOfDifferentWords(dictReligion)
sizeOfSexualOrientation = totalNumberOfDifferentWords(dictSexualOrientation)
sizeOfOtherRacialBias = totalNumberOfDifferentWords(dictOtherRacialBias)
sizeOfPositiveTweet = totalNumberOfDifferentWords(dictPositiveTweet)
sizeOfNegativeTweet = totalNumberOfDifferentWords(dictNegativeTweet)
sizeOfNeutralTweet = totalNumberOfDifferentWords(dictNeutralTweet)
differentWordsInEthnicity = len(dictEthnicity)
differentWordsInReligion = len(dictReligion)
differentWordsInSexualOrientation = len(dictSexualOrientation)
differentWordsInOtherRacialBias = len(dictOtherRacialBias)
differentWordsInPositiveTweet = len(dictPositiveTweet)
differentWordsInNegativeTweet = len(dictNegativeTweet)
differentWordsInNeutralTweet = len(dictNeutralTweet)
result = determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData)
'''
sortedByEthnicity = sorted(result, key=lambda x: x[1], reverse=True)
sortedByReligion = sorted(result, key=lambda x: x[2], reverse=True)
sortedBySexualOrientation = sorted(result, key=lambda x: x[3], reverse=True)
sortedByOtherRacialBias = sorted(result, key=lambda x: x[4], reverse=True)
sortedByPositiveTweets = sorted(result, key=lambda x: x[5], reverse=True)
sortedByNeutralTweets = sorted(result, key=lambda x: x[6], reverse=True)
sortedByNegativeTweets = sorted(result, key=lambda x: x[7], reverse=True)
'''
print("How do you want to sort? \n 1 for Ethnicity \n 2 for Religion \n 3 for Sexual Orientation \n 4 for Other Racial Bias \n 5 for positive attitude \n 6 for neutral attitude \n 7 for negative attitude")
parameter = int(input())
sortedTweetsByParameter = sortResult(result, parameter)
with open('outputWithAllProbability.csv','w', newline='') as of:
writer = csv.writer(of)
writer.writerow(["user_id","ethnicity", "religion", "sexualOrientation", "OtherRacialBias", "Positive Attitude", "Neutral Attitude", "Negative Attitude"])
with open('outputWithAllProbability.csv', 'a+',newline='') as of:
for i in sortedTweetsByParameter:
print(i, file=of)
print("How many accounts do you want to find?")
numberOfAccounts = int(input())
userColumn = [0]
topAccountArray = pandaTestData.as_matrix(columns = pandaTestData.columns[userColumn])
tempList = []
for i in range(numberOfAccounts):
tempList.append(sortedTweetsByParameter[i][0])
topAccountList = []
for i in tempList:
temp = int(i)
topAccountList.append(topAccountArray[temp][0])
with open('output.csv','w') as out_file:
for i in topAccountList:
print(i, file=out_file)
print("Please check the CSV file for the persons responsible. Thanks.")
| result = []
regex = re.compile('[^a-zA-Z$]')
tempCount = 0
for i in arrayTestData:
testString = str(i)
probEthnicity = classifierForPolarity(dictEthnicity, sizeOfEthnicity, differentWordsInEthnicity, testString)
probReligion = classifierForPolarity(dictReligion, sizeOfReligion, differentWordsInReligion, testString)
probSexualOrientation = classifierForPolarity(dictSexualOrientation, sizeOfSexualOrientation, differentWordsInSexualOrientation, testString)
probOtherRacialBias = classifierForPolarity(dictOtherRacialBias, sizeOfOtherRacialBias, differentWordsInOtherRacialBias, testString)
probPositiveTweet = classifierForPolarity(dictPositiveTweet, sizeOfPositiveTweet, differentWordsInPositiveTweet, testString)
probNeutralTweet = classifierForPolarity(dictNeutralTweet, sizeOfNeutralTweet, differentWordsInNeutralTweet, testString)
probNegativeTweet = classifierForPolarity(dictNegativeTweet, sizeOfNegativeTweet, differentWordsInNegativeTweet, testString)
temp = [tempCount, probEthnicity, probReligion, probSexualOrientation, probOtherRacialBias, probPositiveTweet, probNeutralTweet, probNegativeTweet]
result.append(temp)
temp = []
tempCount += 1
return(result) | identifier_body |
data_mining_project3.py | import tweepy
import json
import csv
import pandas as pd
import re
from _operator import pos
from jedi.parser.tokenize import number
### Using StreamListener class for collecting the tweets.
class | (tweepy.StreamListener):
def on_data(self, data):
global n ### Since we have to use this variable in main function, we set it as global varible.
if (n < count):
#print (data)
status = json.loads(data)
try:
try:
with open('tweets.csv', 'a+',newline='') as f: ### Create a csv file which will contain streamed tweets.
writer = csv.writer(f)
writer.writerow([status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text']])
#print (status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text'],"\n")
#print ('')
n += 1 ### If only the tweets is collected without errors, increse the number of total colleted tweets.
except UnicodeEncodeError:
pass
except KeyError:
pass
return True
else :
return False
def on_error(self, status):
print('error : ', status)
def printss():
print("Hello")
'''
Given a list of strings, the follwoing subroutine outputs the frequency of each item in the list.
input: list
output: dictionary
example: input: ['a', 'a', 'b']
output: {('a':2), ('b':1)}
'''
def frequencyCounter(ArrayOfStrings):
#print(type(ArrayOfStrings[2]))
dictionary = {}
regex = re.compile('[^a-zA-Z$]')
for items in ArrayOfStrings:
temp = items.split()
for i in temp:
i = regex.sub('', i)
if i not in dictionary.keys():
dictionary[i] = 1
else:
dictionary[i] += 1
return(dictionary)
'''
This subroutine calculates the total number of different words in the dictionary.
input: dictionary
output: count, which is an int variable.
'''
def totalNumberOfDifferentWords(dictionary):
count = 0
for i in dictionary.values():
count = count + int(i)
return count
'''
The following subroutine is called by the determinePolarity subroutine.
This simply calculates the probability of words according to Naive Bayes Classifier.
input: dictionary, size of dictionary, differentWords in dictionary, test string
output: probability of the words occuring. It'a a single float variable.
'''
def classifierForPolarity(dictionary, totalSize, differentWords, strings):
regex = re.compile('[^a-zA-Z$]')
wordList = strings.split()
divider = differentWords + totalSize
count = 1
probabilityList = []
for i in wordList:
i = regex.sub('', i)
if str(i) in dictionary.keys():
temp = dictionary[str(i)]
count += int(temp)
else:
count = count
classify = count / divider
probabilityList.append(classify)
classify = 0
count = 1
totalProb = 1
for i in probabilityList:
totalProb = totalProb * i
return(totalProb*0.5)
'''
This subroutine is called by the main program. It uses all available lists, dictionaries and other
data to determine the polarity of a tweet.
Probability of a tweet is calculated by the classifierForPolarity subroutine. Both the dictionaries
are used for this part.
Depending on which probability is larger, polarity of the tweet is determined.
If there is 50% or more similarity between the probabilities,
polarity is determined as neutral.
input: dictionries, lists, int variables
output: list
'''
def determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData):
result = []
regex = re.compile('[^a-zA-Z$]')
tempCount = 0
for i in arrayTestData:
testString = str(i)
probEthnicity = classifierForPolarity(dictEthnicity, sizeOfEthnicity, differentWordsInEthnicity, testString)
probReligion = classifierForPolarity(dictReligion, sizeOfReligion, differentWordsInReligion, testString)
probSexualOrientation = classifierForPolarity(dictSexualOrientation, sizeOfSexualOrientation, differentWordsInSexualOrientation, testString)
probOtherRacialBias = classifierForPolarity(dictOtherRacialBias, sizeOfOtherRacialBias, differentWordsInOtherRacialBias, testString)
probPositiveTweet = classifierForPolarity(dictPositiveTweet, sizeOfPositiveTweet, differentWordsInPositiveTweet, testString)
probNeutralTweet = classifierForPolarity(dictNeutralTweet, sizeOfNeutralTweet, differentWordsInNeutralTweet, testString)
probNegativeTweet = classifierForPolarity(dictNegativeTweet, sizeOfNegativeTweet, differentWordsInNegativeTweet, testString)
temp = [tempCount, probEthnicity, probReligion, probSexualOrientation, probOtherRacialBias, probPositiveTweet, probNeutralTweet, probNegativeTweet]
result.append(temp)
temp = []
tempCount += 1
return(result)
'''
The following program sorts results of a 2d array based on a parameter.
'''
def sortResult(result, parameter):
sortedTweets = sorted(result, key=lambda x: x[parameter], reverse=True)
return(sortedTweets)
### Authentication details.
consumer_key = 'uAJx6MwKhYDMKamBrarSiGUTd'
consumer_secret = 'na8DAWlfuHSQllqVgcAcUtCSCblL6RtaFmvePFoA1fBUn03d1d'
access_token = '121692329-Q4fTuw0FmJeGkIHLnmxgX7u9ajpHiyIDrLwsnLVp'
access_token_secret = 'fA26olnFJH26y8t1tPOLXs7oXorHLrjyeSk5YKEokrSMX'
count = 0
n = 1
### was previously part of main program
print ("How many tweets do you want to collect?:")
### global count
count = int(input())
print ("Start to collect the tweets")
### Open the csv file saved streamed tweets.
with open('tweets.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(["user_id","created_at","location","favorite_count","retweet_count","text"])
l = listener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
### Set the keywords that related with racisim that we think and collect the tweets which contain at least one of the those keywords.
stream = tweepy.Stream(auth, l)
stream.filter(track=['racism','racist','racial','race','discrimination','black','white','skinhead', 'sexism', 'LGBT','lesbian','gay','bisexual','transgender', 'homophobia', 'male', 'female', 'misogynistic', 'asian', 'african american', 'hispanic', 'native american','christian','jew','muslim','hindu','buddhist','bigotry', 'prejudice','kkk','negro','honkie','chink','yankee','ethnic','unfairness','racialism','color-blind','segregation'])
#print("Program Finished")
pandaData = pd.read_csv('groundTruth.csv', encoding = "ISO-8859-1")
chosenColumns = [0,1,2]
arrayData = pandaData.as_matrix(columns = pandaData.columns[chosenColumns])
ethnicity = []
religion = []
sexualOrientation = []
otherRacialBias = []
positiveTweet = []
negativeTweet = []
neutralTweet = []
for i,j in enumerate(arrayData):
if j[1] == 1:
ethnicity.append(j[0])
elif j[1] == 2:
religion.append(j[0])
elif j[1] == 3:
sexualOrientation.append(j[0])
elif j[1] == 4:
otherRacialBias.append(j[0])
for i,j in enumerate(arrayData):
if j[2] == 0:
negativeTweet.append(j[0])
elif j[2] == 1:
neutralTweet.append(j[0])
elif j[2] == 2:
positiveTweet.append(j[0])
pandaTestData = pd.read_csv('tweets.csv',encoding = "ISO-8859-1")
chosenTestColumn = [5]
arrayTestData = pandaTestData.as_matrix(columns = pandaTestData.columns[chosenTestColumn])
#print(arrayTestData)
'''
The following code segment counts the frequency of the words.
input type: list
output type: dictionary
'''
dictEthnicity = frequencyCounter(ethnicity)
dictReligion = frequencyCounter(religion)
dictSexualOrientation = frequencyCounter(sexualOrientation)
dictOtherRacialBias = frequencyCounter(otherRacialBias)
dictPositiveTweet = frequencyCounter(positiveTweet)
dictNegativeTweet = frequencyCounter(negativeTweet)
dictNeutralTweet = frequencyCounter(neutralTweet)
'''
The following code segment counts the total number of words in
each dictionary.
'''
sizeOfEthnicity = totalNumberOfDifferentWords(dictEthnicity)
sizeOfReligion = totalNumberOfDifferentWords(dictReligion)
sizeOfSexualOrientation = totalNumberOfDifferentWords(dictSexualOrientation)
sizeOfOtherRacialBias = totalNumberOfDifferentWords(dictOtherRacialBias)
sizeOfPositiveTweet = totalNumberOfDifferentWords(dictPositiveTweet)
sizeOfNegativeTweet = totalNumberOfDifferentWords(dictNegativeTweet)
sizeOfNeutralTweet = totalNumberOfDifferentWords(dictNeutralTweet)
differentWordsInEthnicity = len(dictEthnicity)
differentWordsInReligion = len(dictReligion)
differentWordsInSexualOrientation = len(dictSexualOrientation)
differentWordsInOtherRacialBias = len(dictOtherRacialBias)
differentWordsInPositiveTweet = len(dictPositiveTweet)
differentWordsInNegativeTweet = len(dictNegativeTweet)
differentWordsInNeutralTweet = len(dictNeutralTweet)
result = determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData)
'''
sortedByEthnicity = sorted(result, key=lambda x: x[1], reverse=True)
sortedByReligion = sorted(result, key=lambda x: x[2], reverse=True)
sortedBySexualOrientation = sorted(result, key=lambda x: x[3], reverse=True)
sortedByOtherRacialBias = sorted(result, key=lambda x: x[4], reverse=True)
sortedByPositiveTweets = sorted(result, key=lambda x: x[5], reverse=True)
sortedByNeutralTweets = sorted(result, key=lambda x: x[6], reverse=True)
sortedByNegativeTweets = sorted(result, key=lambda x: x[7], reverse=True)
'''
print("How do you want to sort? \n 1 for Ethnicity \n 2 for Religion \n 3 for Sexual Orientation \n 4 for Other Racial Bias \n 5 for positive attitude \n 6 for neutral attitude \n 7 for negative attitude")
parameter = int(input())
sortedTweetsByParameter = sortResult(result, parameter)
with open('outputWithAllProbability.csv','w', newline='') as of:
writer = csv.writer(of)
writer.writerow(["user_id","ethnicity", "religion", "sexualOrientation", "OtherRacialBias", "Positive Attitude", "Neutral Attitude", "Negative Attitude"])
with open('outputWithAllProbability.csv', 'a+',newline='') as of:
for i in sortedTweetsByParameter:
print(i, file=of)
print("How many accounts do you want to find?")
numberOfAccounts = int(input())
userColumn = [0]
topAccountArray = pandaTestData.as_matrix(columns = pandaTestData.columns[userColumn])
tempList = []
for i in range(numberOfAccounts):
tempList.append(sortedTweetsByParameter[i][0])
topAccountList = []
for i in tempList:
temp = int(i)
topAccountList.append(topAccountArray[temp][0])
with open('output.csv','w') as out_file:
for i in topAccountList:
print(i, file=out_file)
print("Please check the CSV file for the persons responsible. Thanks.")
| listener | identifier_name |
data_mining_project3.py | import tweepy
import json
import csv
import pandas as pd
import re
from _operator import pos
from jedi.parser.tokenize import number
### Using StreamListener class for collecting the tweets.
class listener(tweepy.StreamListener):
def on_data(self, data):
global n ### Since we have to use this variable in main function, we set it as global varible.
if (n < count):
#print (data)
status = json.loads(data)
try:
try:
with open('tweets.csv', 'a+',newline='') as f: ### Create a csv file which will contain streamed tweets.
writer = csv.writer(f)
writer.writerow([status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text']])
#print (status['user']['screen_name'],status['created_at'], status['user']['location'], status['favorite_count'], status['retweet_count'], status['text'],"\n")
#print ('')
n += 1 ### If only the tweets is collected without errors, increse the number of total colleted tweets.
except UnicodeEncodeError:
pass
except KeyError:
pass
return True
else :
return False
def on_error(self, status):
print('error : ', status)
def printss():
print("Hello")
'''
Given a list of strings, the follwoing subroutine outputs the frequency of each item in the list.
input: list
output: dictionary
example: input: ['a', 'a', 'b']
output: {('a':2), ('b':1)}
'''
def frequencyCounter(ArrayOfStrings):
#print(type(ArrayOfStrings[2]))
dictionary = {}
regex = re.compile('[^a-zA-Z$]')
for items in ArrayOfStrings:
temp = items.split()
for i in temp:
i = regex.sub('', i)
if i not in dictionary.keys():
dictionary[i] = 1
else:
dictionary[i] += 1
return(dictionary)
'''
This subroutine calculates the total number of different words in the dictionary.
input: dictionary
output: count, which is an int variable.
'''
def totalNumberOfDifferentWords(dictionary):
count = 0
for i in dictionary.values():
count = count + int(i)
return count
'''
The following subroutine is called by the determinePolarity subroutine.
This simply calculates the probability of words according to Naive Bayes Classifier.
input: dictionary, size of dictionary, differentWords in dictionary, test string
output: probability of the words occuring. It'a a single float variable.
'''
def classifierForPolarity(dictionary, totalSize, differentWords, strings):
regex = re.compile('[^a-zA-Z$]')
wordList = strings.split()
divider = differentWords + totalSize
count = 1
probabilityList = []
for i in wordList:
i = regex.sub('', i)
if str(i) in dictionary.keys():
temp = dictionary[str(i)]
count += int(temp)
else:
count = count
classify = count / divider
probabilityList.append(classify)
classify = 0
count = 1
totalProb = 1
for i in probabilityList:
totalProb = totalProb * i
return(totalProb*0.5)
'''
This subroutine is called by the main program. It uses all available lists, dictionaries and other
data to determine the polarity of a tweet.
Probability of a tweet is calculated by the classifierForPolarity subroutine. Both the dictionaries
are used for this part.
Depending on which probability is larger, polarity of the tweet is determined.
If there is 50% or more similarity between the probabilities,
polarity is determined as neutral.
input: dictionries, lists, int variables
output: list
'''
def determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData):
result = []
regex = re.compile('[^a-zA-Z$]')
tempCount = 0
for i in arrayTestData:
testString = str(i)
probEthnicity = classifierForPolarity(dictEthnicity, sizeOfEthnicity, differentWordsInEthnicity, testString)
probReligion = classifierForPolarity(dictReligion, sizeOfReligion, differentWordsInReligion, testString)
probSexualOrientation = classifierForPolarity(dictSexualOrientation, sizeOfSexualOrientation, differentWordsInSexualOrientation, testString)
probOtherRacialBias = classifierForPolarity(dictOtherRacialBias, sizeOfOtherRacialBias, differentWordsInOtherRacialBias, testString)
probPositiveTweet = classifierForPolarity(dictPositiveTweet, sizeOfPositiveTweet, differentWordsInPositiveTweet, testString)
probNeutralTweet = classifierForPolarity(dictNeutralTweet, sizeOfNeutralTweet, differentWordsInNeutralTweet, testString)
probNegativeTweet = classifierForPolarity(dictNegativeTweet, sizeOfNegativeTweet, differentWordsInNegativeTweet, testString)
temp = [tempCount, probEthnicity, probReligion, probSexualOrientation, probOtherRacialBias, probPositiveTweet, probNeutralTweet, probNegativeTweet]
result.append(temp)
temp = []
tempCount += 1
return(result)
'''
The following program sorts results of a 2d array based on a parameter.
'''
def sortResult(result, parameter):
sortedTweets = sorted(result, key=lambda x: x[parameter], reverse=True)
return(sortedTweets)
### Authentication details.
consumer_key = 'uAJx6MwKhYDMKamBrarSiGUTd'
consumer_secret = 'na8DAWlfuHSQllqVgcAcUtCSCblL6RtaFmvePFoA1fBUn03d1d'
access_token = '121692329-Q4fTuw0FmJeGkIHLnmxgX7u9ajpHiyIDrLwsnLVp'
access_token_secret = 'fA26olnFJH26y8t1tPOLXs7oXorHLrjyeSk5YKEokrSMX'
count = 0
n = 1
### was previously part of main program
print ("How many tweets do you want to collect?:")
### global count
count = int(input())
print ("Start to collect the tweets")
### Open the csv file saved streamed tweets.
with open('tweets.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(["user_id","created_at","location","favorite_count","retweet_count","text"])
l = listener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
### Set the keywords that related with racisim that we think and collect the tweets which contain at least one of the those keywords.
stream = tweepy.Stream(auth, l)
stream.filter(track=['racism','racist','racial','race','discrimination','black','white','skinhead', 'sexism', 'LGBT','lesbian','gay','bisexual','transgender', 'homophobia', 'male', 'female', 'misogynistic', 'asian', 'african american', 'hispanic', 'native american','christian','jew','muslim','hindu','buddhist','bigotry', 'prejudice','kkk','negro','honkie','chink','yankee','ethnic','unfairness','racialism','color-blind','segregation'])
#print("Program Finished")
pandaData = pd.read_csv('groundTruth.csv', encoding = "ISO-8859-1")
chosenColumns = [0,1,2]
arrayData = pandaData.as_matrix(columns = pandaData.columns[chosenColumns])
ethnicity = [] | neutralTweet = []
for i,j in enumerate(arrayData):
if j[1] == 1:
ethnicity.append(j[0])
elif j[1] == 2:
religion.append(j[0])
elif j[1] == 3:
sexualOrientation.append(j[0])
elif j[1] == 4:
otherRacialBias.append(j[0])
for i,j in enumerate(arrayData):
if j[2] == 0:
negativeTweet.append(j[0])
elif j[2] == 1:
neutralTweet.append(j[0])
elif j[2] == 2:
positiveTweet.append(j[0])
pandaTestData = pd.read_csv('tweets.csv',encoding = "ISO-8859-1")
chosenTestColumn = [5]
arrayTestData = pandaTestData.as_matrix(columns = pandaTestData.columns[chosenTestColumn])
#print(arrayTestData)
'''
The following code segment counts the frequency of the words.
input type: list
output type: dictionary
'''
dictEthnicity = frequencyCounter(ethnicity)
dictReligion = frequencyCounter(religion)
dictSexualOrientation = frequencyCounter(sexualOrientation)
dictOtherRacialBias = frequencyCounter(otherRacialBias)
dictPositiveTweet = frequencyCounter(positiveTweet)
dictNegativeTweet = frequencyCounter(negativeTweet)
dictNeutralTweet = frequencyCounter(neutralTweet)
'''
The following code segment counts the total number of words in
each dictionary.
'''
sizeOfEthnicity = totalNumberOfDifferentWords(dictEthnicity)
sizeOfReligion = totalNumberOfDifferentWords(dictReligion)
sizeOfSexualOrientation = totalNumberOfDifferentWords(dictSexualOrientation)
sizeOfOtherRacialBias = totalNumberOfDifferentWords(dictOtherRacialBias)
sizeOfPositiveTweet = totalNumberOfDifferentWords(dictPositiveTweet)
sizeOfNegativeTweet = totalNumberOfDifferentWords(dictNegativeTweet)
sizeOfNeutralTweet = totalNumberOfDifferentWords(dictNeutralTweet)
differentWordsInEthnicity = len(dictEthnicity)
differentWordsInReligion = len(dictReligion)
differentWordsInSexualOrientation = len(dictSexualOrientation)
differentWordsInOtherRacialBias = len(dictOtherRacialBias)
differentWordsInPositiveTweet = len(dictPositiveTweet)
differentWordsInNegativeTweet = len(dictNegativeTweet)
differentWordsInNeutralTweet = len(dictNeutralTweet)
result = determinePolarity(dictEthnicity, dictReligion, dictSexualOrientation, dictOtherRacialBias, dictPositiveTweet, dictNeutralTweet, dictNegativeTweet, sizeOfEthnicity, sizeOfReligion, sizeOfSexualOrientation, sizeOfOtherRacialBias, sizeOfPositiveTweet, sizeOfNeutralTweet, sizeOfNegativeTweet, differentWordsInEthnicity, differentWordsInReligion, differentWordsInSexualOrientation, differentWordsInOtherRacialBias, differentWordsInPositiveTweet, differentWordsInNegativeTweet, differentWordsInNeutralTweet, arrayTestData)
'''
sortedByEthnicity = sorted(result, key=lambda x: x[1], reverse=True)
sortedByReligion = sorted(result, key=lambda x: x[2], reverse=True)
sortedBySexualOrientation = sorted(result, key=lambda x: x[3], reverse=True)
sortedByOtherRacialBias = sorted(result, key=lambda x: x[4], reverse=True)
sortedByPositiveTweets = sorted(result, key=lambda x: x[5], reverse=True)
sortedByNeutralTweets = sorted(result, key=lambda x: x[6], reverse=True)
sortedByNegativeTweets = sorted(result, key=lambda x: x[7], reverse=True)
'''
print("How do you want to sort? \n 1 for Ethnicity \n 2 for Religion \n 3 for Sexual Orientation \n 4 for Other Racial Bias \n 5 for positive attitude \n 6 for neutral attitude \n 7 for negative attitude")
parameter = int(input())
sortedTweetsByParameter = sortResult(result, parameter)
with open('outputWithAllProbability.csv','w', newline='') as of:
writer = csv.writer(of)
writer.writerow(["user_id","ethnicity", "religion", "sexualOrientation", "OtherRacialBias", "Positive Attitude", "Neutral Attitude", "Negative Attitude"])
with open('outputWithAllProbability.csv', 'a+',newline='') as of:
for i in sortedTweetsByParameter:
print(i, file=of)
print("How many accounts do you want to find?")
numberOfAccounts = int(input())
userColumn = [0]
topAccountArray = pandaTestData.as_matrix(columns = pandaTestData.columns[userColumn])
tempList = []
for i in range(numberOfAccounts):
tempList.append(sortedTweetsByParameter[i][0])
topAccountList = []
for i in tempList:
temp = int(i)
topAccountList.append(topAccountArray[temp][0])
with open('output.csv','w') as out_file:
for i in topAccountList:
print(i, file=out_file)
print("Please check the CSV file for the persons responsible. Thanks.") | religion = []
sexualOrientation = []
otherRacialBias = []
positiveTweet = []
negativeTweet = [] | random_line_split |
client.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"fmt"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
sdkConfigApi "github.com/hyperledger/fabric-sdk-go/api/apiconfig"
sdkApi "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
apitxn "github.com/hyperledger/fabric-sdk-go/api/apitxn"
"github.com/hyperledger/fabric-sdk-go/pkg/config"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/pkg/status"
"github.com/securekey/fabric-snaps/util/errors"
sdkorderer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/orderer"
protosMSP "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/bccsp"
pb "github.com/hyperledger/fabric/protos/peer"
logging "github.com/hyperledger/fabric-sdk-go/pkg/logging"
eventapi "github.com/securekey/fabric-snaps/eventservice/api"
eventservice "github.com/securekey/fabric-snaps/eventservice/pkg/localservice"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/factories"
utils "github.com/securekey/fabric-snaps/transactionsnap/cmd/utils"
)
var module = "txnsnap"
var logger = logging.NewLogger(module)
const (
txnSnapUser = "Txn-Snap-User"
)
type clientImpl struct {
sync.RWMutex
client sdkApi.Resource
selectionService api.SelectionService
config api.Config
}
var cachedClient map[string]*clientImpl
//var client *clientImpl
var clientMutex sync.RWMutex
var once sync.Once
// GetInstance returns a singleton instance of the fabric client
func GetInstance(channelID string, config api.Config) (api.Client, error) {
if channelID == "" {
return nil, errors.New(errors.GeneralError, "Channel is required")
}
var c *clientImpl
c.initializeCache()
clientMutex.RLock()
c = cachedClient[channelID] //client from cache
clientMutex.RUnlock()
if c != nil {
return c, nil
}
clientMutex.Lock()
defer clientMutex.Unlock()
c = &clientImpl{selectionService: NewSelectionService(config), config: config}
err := c.initialize(config.GetConfigBytes())
if err != nil |
if c.client == nil {
logger.Errorf("Error: SDK client is nil!!!\n")
return nil, errors.New(errors.GeneralError, "SDK client is nil")
}
//put client into cache
cachedClient[channelID] = c
return c, nil
}
//initializeCache used to initialize client cache
func (c *clientImpl) initializeCache() {
once.Do(func() {
logger.Debugf("Client cache was created")
cachedClient = make(map[string]*clientImpl)
})
}
func (c *clientImpl) NewChannel(name string) (sdkApi.Channel, error) {
c.RLock()
chain := c.client.Channel(name)
c.RUnlock()
if chain != nil {
return chain, nil
}
c.Lock()
defer c.Unlock()
channel, err := c.client.NewChannel(name)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error creating new channel")
}
ordererConfig, err := c.client.Config().RandomOrdererConfig()
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "GetRandomOrdererConfig return error")
}
opts, err := withOrdererOptions(ordererConfig)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "withOrdererOptions return error")
}
orderer, err := sdkorderer.New(c.client.Config(), opts...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error adding orderer")
}
channel.AddOrderer(orderer)
return channel, nil
}
func withOrdererOptions(ordererConfig *sdkConfigApi.OrdererConfig) ([]sdkorderer.Option, error) {
opts := []sdkorderer.Option{}
opts = append(opts, sdkorderer.WithURL(ordererConfig.URL))
opts = append(opts, sdkorderer.WithServerName(""))
ocert, err := ordererConfig.TLSCACerts.TLSCert()
if err != nil {
s, ok := status.FromError(err)
// if error is other than EmptyCert, then it should not be ignored, else simply set TLS with no cert
if !ok || s.Code != status.EmptyCert.ToInt32() {
return nil, errors.Wrap(errors.GeneralError, err, "error getting orderer cert from the configs")
}
}
if ocert != nil {
opts = append(opts, sdkorderer.WithTLSCert(ocert))
}
return opts, nil
}
func (c *clientImpl) GetChannel(name string) (sdkApi.Channel, error) {
c.RLock()
defer c.RUnlock()
channel := c.client.Channel(name)
if channel == nil {
return nil, errors.Errorf(errors.GeneralError, "Channel %s has not been created", name)
}
return channel, nil
}
func (c *clientImpl) EndorseTransaction(channel sdkApi.Channel, endorseRequest *api.EndorseTxRequest) (
[]*apitxn.TransactionProposalResponse, error) {
if len(endorseRequest.Args) == 0 {
return nil, errors.Errorf(errors.GeneralError,
"Args cannot be empty. Args[0] is expected to be the function name")
}
var peers []sdkApi.Peer
var processors []apitxn.ProposalProcessor
var err error
var ccIDsForEndorsement []string
if endorseRequest.Targets == nil {
if len(endorseRequest.ChaincodeIDs) == 0 {
ccIDsForEndorsement = append(ccIDsForEndorsement, endorseRequest.ChaincodeID)
} else {
ccIDsForEndorsement = endorseRequest.ChaincodeIDs
}
// Select endorsers
remainingAttempts := c.config.GetEndorserSelectionMaxAttempts()
logger.Infof("Attempting to get endorsers - [%d] attempts...", remainingAttempts)
for len(peers) == 0 && remainingAttempts > 0 {
peers, err = c.selectionService.GetEndorsersForChaincode(channel.Name(),
endorseRequest.PeerFilter, ccIDsForEndorsement...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "error selecting endorsers")
}
if len(peers) == 0 {
remainingAttempts--
logger.Warnf("No endorsers. [%d] remaining attempts...", remainingAttempts)
time.Sleep(c.config.GetEndorserSelectionInterval())
}
}
if len(peers) == 0 {
logger.Errorf("No suitable endorsers found for transaction.")
return nil, errors.New(errors.GeneralError, "no suitable endorsers found for transaction")
}
} else {
peers = endorseRequest.Targets
}
for _, peer := range peers {
logger.Debugf("Target peer %v", peer.URL())
processors = append(processors, apitxn.ProposalProcessor(peer))
}
c.RLock()
defer c.RUnlock()
logger.Debugf("Requesting endorsements from %s, on channel %s",
endorseRequest.ChaincodeID, channel.Name())
request := apitxn.ChaincodeInvokeRequest{
Targets: processors,
Fcn: endorseRequest.Args[0],
Args: utils.GetByteArgs(endorseRequest.Args[1:]),
TransientMap: endorseRequest.TransientData,
ChaincodeID: endorseRequest.ChaincodeID,
}
// TODO: Replace this code with the GO SDK's ChannelClient
responses, _, err := channel.SendTransactionProposal(request)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error sending transaction proposal")
}
// TODO: Replace the following code with the GO SDK's endorsement validation logic
if len(responses) == 0 {
return nil, errors.New(errors.GeneralError, "Did not receive any endorsements")
}
var errorResponses []string
for _, response := range responses {
if response.Err != nil {
errorResponses = append(errorResponses, response.Err.Error())
}
}
if len(errorResponses) > 0 {
return responses, errors.Errorf(errors.GeneralError, strings.Join(errorResponses, "\n"))
}
if len(responses) != len(processors) {
return responses, errors.Errorf(errors.GeneralError, "only %d out of %d responses were received", len(responses), len(processors))
}
return responses, nil
}
func (c *clientImpl) CommitTransaction(channel sdkApi.Channel,
responses []*apitxn.TransactionProposalResponse, registerTxEvent bool) error {
c.RLock()
defer c.RUnlock()
transaction, err := channel.CreateTransaction(responses)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error creating transaction")
}
logger.Debugf("Sending transaction [%s] for commit", transaction.Proposal.TxnID.ID)
var txStatusEventCh <-chan *eventapi.TxStatusEvent
txID := transaction.Proposal.TxnID
if registerTxEvent {
events := eventservice.Get(channel.Name())
reg, eventch, err := events.RegisterTxStatusEvent(txID.ID)
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "unable to register for TxStatus event for TxID [%s] on channel [%s]", txID, channel.Name())
}
defer events.Unregister(reg)
txStatusEventCh = eventch
}
resp, err := channel.SendTransaction(transaction)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error sending transaction")
}
if resp.Err != nil {
return errors.WithMessage(errors.GeneralError, resp.Err, "Error sending transaction")
}
if registerTxEvent {
select {
case txStatusEvent := <-txStatusEventCh:
if txStatusEvent.TxValidationCode != pb.TxValidationCode_VALID {
return errors.Errorf(errors.GeneralError, "transaction [%s] did not commit successfully. Code: [%s]", txID.ID, txStatusEvent.TxValidationCode)
}
logger.Debugf("Transaction [%s] successfully committed", txID.ID)
case <-time.After(c.config.GetCommitTimeout()):
return errors.Errorf(errors.GeneralError, "SendTransaction Didn't receive tx event for txid(%s)", txID.ID)
}
}
return nil
}
// /QueryChannels to query channels based on peer
func (c *clientImpl) QueryChannels(peer sdkApi.Peer) ([]string, error) {
responses, err := c.client.QueryChannels(peer)
if err != nil {
return nil, errors.Errorf(errors.GeneralError, "Error querying channels on peer %+v : %s", peer, err)
}
channels := []string{}
for _, response := range responses.GetChannels() {
channels = append(channels, response.ChannelId)
}
return channels, nil
}
// Verify Transaction Proposal signature
func (c *clientImpl) VerifyTxnProposalSignature(channel sdkApi.Channel, proposalBytes []byte) error {
if channel.MSPManager() == nil {
return errors.Errorf(errors.GeneralError, "Channel %s GetMSPManager is nil", channel.Name())
}
msps, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "GetMSPs return error:%v", err)
}
if len(msps) == 0 {
return errors.Errorf(errors.GeneralError, "Channel %s MSPManager.GetMSPs is empty", channel.Name())
}
signedProposal := &pb.SignedProposal{}
if err := proto.Unmarshal(proposalBytes, signedProposal); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal clientProposalBytes error")
}
creatorBytes, err := utils.GetCreatorFromSignedProposal(signedProposal)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "GetCreatorFromSignedProposal return error")
}
serializedIdentity := &protosMSP.SerializedIdentity{}
if err := proto.Unmarshal(creatorBytes, serializedIdentity); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal creatorBytes error")
}
msp := msps[serializedIdentity.Mspid]
if msp == nil {
return errors.Errorf(errors.GeneralError, "MSP %s not found", serializedIdentity.Mspid)
}
creator, err := msp.DeserializeIdentity(creatorBytes)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "Failed to deserialize creator identity")
}
logger.Debugf("checkSignatureFromCreator info: creator is %s", creator.GetIdentifier())
// ensure that creator is a valid certificate
err = creator.Validate()
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator certificate is not valid")
}
logger.Debugf("verifyTPSignature info: creator is valid")
// validate the signature
err = creator.Verify(signedProposal.ProposalBytes, signedProposal.Signature)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator's signature over the proposal is not valid")
}
logger.Debugf("VerifyTxnProposalSignature exists successfully")
return nil
}
func (c *clientImpl) SetSelectionService(service api.SelectionService) {
c.Lock()
defer c.Unlock()
c.selectionService = service
}
func (c *clientImpl) GetSelectionService() api.SelectionService {
return c.selectionService
}
func (c *clientImpl) GetEventHub() (sdkApi.EventHub, error) {
eventHub, err := events.NewEventHub(c.client)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed to get NewEventHub")
}
return eventHub, err
}
func (c *clientImpl) InitializeChannel(channel sdkApi.Channel) error {
c.RLock()
isInitialized := channel.IsInitialized()
c.RUnlock()
if isInitialized {
logger.Debug("Chain is initialized. Returning.")
return nil
}
c.Lock()
defer c.Unlock()
err := channel.Initialize(nil)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error initializing new channel")
}
// Channel initialized. Add MSP roots to TLS cert pool.
err = c.initializeTLSPool(channel)
if err != nil {
return err
}
return nil
}
func (c *clientImpl) initializeTLSPool(channel sdkApi.Channel) error {
globalCertPool, err := c.client.Config().TLSCACertPool()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed TLSCACertPool")
}
mspMap, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "Error getting MSPs for channel %s: %v",
channel.Name(), err)
}
for _, msp := range mspMap {
for _, cert := range msp.GetTLSRootCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
for _, cert := range msp.GetTLSIntermediateCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
}
c.client.Config().SetTLSCACertPool(globalCertPool)
return nil
}
func (c *clientImpl) initialize(sdkConfig []byte) error {
//Get cryptosuite provider name from name from peerconfig
cryptoProvider, err := c.config.GetCryptoProvider()
if err != nil {
return err
}
sdk, err := fabsdk.New(config.FromRaw(sdkConfig, "yaml"),
fabsdk.WithContextPkg(&factories.CredentialManagerProviderFactory{CryptoPath: c.config.GetMspConfigPath()}),
fabsdk.WithCorePkg(&factories.DefaultCryptoSuiteProviderFactory{ProviderName: cryptoProvider}))
if err != nil {
panic(fmt.Sprintf("Failed to create new SDK: %s", err))
}
configProvider := sdk.ConfigProvider()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error getting config")
}
localPeer, err := c.config.GetLocalPeer()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "GetLocalPeer return error")
}
//Find orgname matching localpeer mspID
nconfig, err := configProvider.NetworkConfig()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed to get network config")
}
var orgname string
for name, org := range nconfig.Organizations {
if org.MspID == string(localPeer.MSPid) {
orgname = name
break
}
}
userSession, err := sdk.NewClient(fabsdk.WithUser(txnSnapUser), fabsdk.WithOrg(orgname)).Session()
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "failed getting user session for org %s", orgname)
}
client, err := sdk.FabricProvider().NewResourceClient(userSession.Identity())
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "NewResourceClient failed")
}
c.client = client
logger.Debugf("Done initializing client. Default log level: %s, fabric_sdk_go log level: %s, txn-snap-config log lelvel: %s", logging.GetLevel(""), logging.GetLevel("fabric_sdk_go"), logging.GetLevel("txn-snap-config"))
return nil
}
func (c *clientImpl) Hash(message []byte) (hash []byte, err error) {
hash, err = c.client.CryptoSuite().Hash(message, &bccsp.SHAOpts{})
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed Hash")
}
return hash, err
}
func (c *clientImpl) GetConfig() sdkConfigApi.Config {
return c.client.Config()
}
func (c *clientImpl) GetSigningIdentity() sdkApi.IdentityContext {
return c.client.IdentityContext()
}
| {
logger.Errorf("Error initializing client: %s\n", err)
return nil, errors.Wrap(errors.GeneralError, err, "error initializing fabric client")
} | conditional_block |
client.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"fmt"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
sdkConfigApi "github.com/hyperledger/fabric-sdk-go/api/apiconfig"
sdkApi "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
apitxn "github.com/hyperledger/fabric-sdk-go/api/apitxn"
"github.com/hyperledger/fabric-sdk-go/pkg/config"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/pkg/status"
"github.com/securekey/fabric-snaps/util/errors"
sdkorderer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/orderer"
protosMSP "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/bccsp"
pb "github.com/hyperledger/fabric/protos/peer"
logging "github.com/hyperledger/fabric-sdk-go/pkg/logging"
eventapi "github.com/securekey/fabric-snaps/eventservice/api"
eventservice "github.com/securekey/fabric-snaps/eventservice/pkg/localservice"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/factories"
utils "github.com/securekey/fabric-snaps/transactionsnap/cmd/utils"
)
var module = "txnsnap"
var logger = logging.NewLogger(module)
const (
txnSnapUser = "Txn-Snap-User"
)
type clientImpl struct {
sync.RWMutex
client sdkApi.Resource
selectionService api.SelectionService
config api.Config
}
var cachedClient map[string]*clientImpl
//var client *clientImpl
var clientMutex sync.RWMutex
var once sync.Once
// GetInstance returns a singleton instance of the fabric client
func GetInstance(channelID string, config api.Config) (api.Client, error) {
if channelID == "" {
return nil, errors.New(errors.GeneralError, "Channel is required")
}
var c *clientImpl
c.initializeCache()
clientMutex.RLock()
c = cachedClient[channelID] //client from cache
clientMutex.RUnlock()
if c != nil {
return c, nil
}
clientMutex.Lock()
defer clientMutex.Unlock()
c = &clientImpl{selectionService: NewSelectionService(config), config: config}
err := c.initialize(config.GetConfigBytes())
if err != nil {
logger.Errorf("Error initializing client: %s\n", err)
return nil, errors.Wrap(errors.GeneralError, err, "error initializing fabric client")
}
if c.client == nil {
logger.Errorf("Error: SDK client is nil!!!\n")
return nil, errors.New(errors.GeneralError, "SDK client is nil")
}
//put client into cache
cachedClient[channelID] = c
return c, nil
}
//initializeCache used to initialize client cache
func (c *clientImpl) initializeCache() {
once.Do(func() {
logger.Debugf("Client cache was created")
cachedClient = make(map[string]*clientImpl)
})
}
func (c *clientImpl) NewChannel(name string) (sdkApi.Channel, error) {
c.RLock()
chain := c.client.Channel(name)
c.RUnlock()
if chain != nil {
return chain, nil
}
c.Lock()
defer c.Unlock()
channel, err := c.client.NewChannel(name)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error creating new channel")
}
ordererConfig, err := c.client.Config().RandomOrdererConfig()
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "GetRandomOrdererConfig return error")
}
opts, err := withOrdererOptions(ordererConfig)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "withOrdererOptions return error")
}
orderer, err := sdkorderer.New(c.client.Config(), opts...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error adding orderer")
}
channel.AddOrderer(orderer)
return channel, nil
}
func withOrdererOptions(ordererConfig *sdkConfigApi.OrdererConfig) ([]sdkorderer.Option, error) {
opts := []sdkorderer.Option{}
opts = append(opts, sdkorderer.WithURL(ordererConfig.URL))
opts = append(opts, sdkorderer.WithServerName(""))
ocert, err := ordererConfig.TLSCACerts.TLSCert()
if err != nil {
s, ok := status.FromError(err)
// if error is other than EmptyCert, then it should not be ignored, else simply set TLS with no cert
if !ok || s.Code != status.EmptyCert.ToInt32() {
return nil, errors.Wrap(errors.GeneralError, err, "error getting orderer cert from the configs")
}
}
if ocert != nil {
opts = append(opts, sdkorderer.WithTLSCert(ocert))
}
return opts, nil
}
func (c *clientImpl) GetChannel(name string) (sdkApi.Channel, error) {
c.RLock()
defer c.RUnlock()
channel := c.client.Channel(name)
if channel == nil {
return nil, errors.Errorf(errors.GeneralError, "Channel %s has not been created", name)
}
return channel, nil
}
func (c *clientImpl) EndorseTransaction(channel sdkApi.Channel, endorseRequest *api.EndorseTxRequest) (
[]*apitxn.TransactionProposalResponse, error) {
if len(endorseRequest.Args) == 0 {
return nil, errors.Errorf(errors.GeneralError,
"Args cannot be empty. Args[0] is expected to be the function name")
}
var peers []sdkApi.Peer
var processors []apitxn.ProposalProcessor
var err error
var ccIDsForEndorsement []string
if endorseRequest.Targets == nil {
if len(endorseRequest.ChaincodeIDs) == 0 {
ccIDsForEndorsement = append(ccIDsForEndorsement, endorseRequest.ChaincodeID)
} else {
ccIDsForEndorsement = endorseRequest.ChaincodeIDs
}
// Select endorsers
remainingAttempts := c.config.GetEndorserSelectionMaxAttempts()
logger.Infof("Attempting to get endorsers - [%d] attempts...", remainingAttempts)
for len(peers) == 0 && remainingAttempts > 0 {
peers, err = c.selectionService.GetEndorsersForChaincode(channel.Name(),
endorseRequest.PeerFilter, ccIDsForEndorsement...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "error selecting endorsers")
}
if len(peers) == 0 {
remainingAttempts--
logger.Warnf("No endorsers. [%d] remaining attempts...", remainingAttempts)
time.Sleep(c.config.GetEndorserSelectionInterval())
}
}
if len(peers) == 0 {
logger.Errorf("No suitable endorsers found for transaction.")
return nil, errors.New(errors.GeneralError, "no suitable endorsers found for transaction")
}
} else {
peers = endorseRequest.Targets
}
for _, peer := range peers {
logger.Debugf("Target peer %v", peer.URL())
processors = append(processors, apitxn.ProposalProcessor(peer))
}
c.RLock()
defer c.RUnlock()
logger.Debugf("Requesting endorsements from %s, on channel %s",
endorseRequest.ChaincodeID, channel.Name())
request := apitxn.ChaincodeInvokeRequest{
Targets: processors,
Fcn: endorseRequest.Args[0],
Args: utils.GetByteArgs(endorseRequest.Args[1:]),
TransientMap: endorseRequest.TransientData,
ChaincodeID: endorseRequest.ChaincodeID,
}
// TODO: Replace this code with the GO SDK's ChannelClient
responses, _, err := channel.SendTransactionProposal(request)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error sending transaction proposal")
}
// TODO: Replace the following code with the GO SDK's endorsement validation logic
if len(responses) == 0 {
return nil, errors.New(errors.GeneralError, "Did not receive any endorsements")
}
var errorResponses []string
for _, response := range responses {
if response.Err != nil {
errorResponses = append(errorResponses, response.Err.Error())
}
}
if len(errorResponses) > 0 {
return responses, errors.Errorf(errors.GeneralError, strings.Join(errorResponses, "\n"))
}
if len(responses) != len(processors) {
return responses, errors.Errorf(errors.GeneralError, "only %d out of %d responses were received", len(responses), len(processors))
}
return responses, nil
}
func (c *clientImpl) CommitTransaction(channel sdkApi.Channel,
responses []*apitxn.TransactionProposalResponse, registerTxEvent bool) error {
c.RLock()
defer c.RUnlock()
transaction, err := channel.CreateTransaction(responses)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error creating transaction")
}
logger.Debugf("Sending transaction [%s] for commit", transaction.Proposal.TxnID.ID)
var txStatusEventCh <-chan *eventapi.TxStatusEvent
txID := transaction.Proposal.TxnID
if registerTxEvent {
events := eventservice.Get(channel.Name())
reg, eventch, err := events.RegisterTxStatusEvent(txID.ID)
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "unable to register for TxStatus event for TxID [%s] on channel [%s]", txID, channel.Name())
}
defer events.Unregister(reg)
txStatusEventCh = eventch
}
resp, err := channel.SendTransaction(transaction)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error sending transaction")
}
if resp.Err != nil {
return errors.WithMessage(errors.GeneralError, resp.Err, "Error sending transaction")
}
if registerTxEvent {
select {
case txStatusEvent := <-txStatusEventCh:
if txStatusEvent.TxValidationCode != pb.TxValidationCode_VALID {
return errors.Errorf(errors.GeneralError, "transaction [%s] did not commit successfully. Code: [%s]", txID.ID, txStatusEvent.TxValidationCode)
}
logger.Debugf("Transaction [%s] successfully committed", txID.ID)
case <-time.After(c.config.GetCommitTimeout()):
return errors.Errorf(errors.GeneralError, "SendTransaction Didn't receive tx event for txid(%s)", txID.ID)
}
}
return nil
}
// /QueryChannels to query channels based on peer
func (c *clientImpl) QueryChannels(peer sdkApi.Peer) ([]string, error) {
responses, err := c.client.QueryChannels(peer)
if err != nil {
return nil, errors.Errorf(errors.GeneralError, "Error querying channels on peer %+v : %s", peer, err)
}
channels := []string{}
for _, response := range responses.GetChannels() {
channels = append(channels, response.ChannelId)
}
return channels, nil
}
// Verify Transaction Proposal signature
func (c *clientImpl) VerifyTxnProposalSignature(channel sdkApi.Channel, proposalBytes []byte) error {
if channel.MSPManager() == nil {
return errors.Errorf(errors.GeneralError, "Channel %s GetMSPManager is nil", channel.Name())
}
msps, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "GetMSPs return error:%v", err)
}
if len(msps) == 0 {
return errors.Errorf(errors.GeneralError, "Channel %s MSPManager.GetMSPs is empty", channel.Name())
}
signedProposal := &pb.SignedProposal{}
if err := proto.Unmarshal(proposalBytes, signedProposal); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal clientProposalBytes error")
}
creatorBytes, err := utils.GetCreatorFromSignedProposal(signedProposal)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "GetCreatorFromSignedProposal return error")
}
serializedIdentity := &protosMSP.SerializedIdentity{}
if err := proto.Unmarshal(creatorBytes, serializedIdentity); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal creatorBytes error")
}
msp := msps[serializedIdentity.Mspid]
if msp == nil {
return errors.Errorf(errors.GeneralError, "MSP %s not found", serializedIdentity.Mspid)
}
creator, err := msp.DeserializeIdentity(creatorBytes)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "Failed to deserialize creator identity")
}
logger.Debugf("checkSignatureFromCreator info: creator is %s", creator.GetIdentifier())
// ensure that creator is a valid certificate
err = creator.Validate()
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator certificate is not valid")
}
logger.Debugf("verifyTPSignature info: creator is valid")
// validate the signature
err = creator.Verify(signedProposal.ProposalBytes, signedProposal.Signature)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator's signature over the proposal is not valid")
}
logger.Debugf("VerifyTxnProposalSignature exists successfully")
return nil
}
func (c *clientImpl) SetSelectionService(service api.SelectionService) {
c.Lock()
defer c.Unlock()
c.selectionService = service
}
func (c *clientImpl) GetSelectionService() api.SelectionService {
return c.selectionService
}
func (c *clientImpl) GetEventHub() (sdkApi.EventHub, error) {
eventHub, err := events.NewEventHub(c.client)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed to get NewEventHub")
}
return eventHub, err
}
func (c *clientImpl) InitializeChannel(channel sdkApi.Channel) error {
c.RLock()
isInitialized := channel.IsInitialized()
c.RUnlock()
if isInitialized {
logger.Debug("Chain is initialized. Returning.")
return nil
}
c.Lock()
defer c.Unlock()
err := channel.Initialize(nil)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error initializing new channel")
}
// Channel initialized. Add MSP roots to TLS cert pool.
err = c.initializeTLSPool(channel)
if err != nil {
return err
}
return nil
}
func (c *clientImpl) initializeTLSPool(channel sdkApi.Channel) error {
globalCertPool, err := c.client.Config().TLSCACertPool()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed TLSCACertPool")
}
mspMap, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "Error getting MSPs for channel %s: %v",
channel.Name(), err)
}
for _, msp := range mspMap {
for _, cert := range msp.GetTLSRootCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
for _, cert := range msp.GetTLSIntermediateCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
}
c.client.Config().SetTLSCACertPool(globalCertPool)
return nil
}
func (c *clientImpl) initialize(sdkConfig []byte) error {
//Get cryptosuite provider name from name from peerconfig
cryptoProvider, err := c.config.GetCryptoProvider()
if err != nil {
return err
}
sdk, err := fabsdk.New(config.FromRaw(sdkConfig, "yaml"),
fabsdk.WithContextPkg(&factories.CredentialManagerProviderFactory{CryptoPath: c.config.GetMspConfigPath()}),
fabsdk.WithCorePkg(&factories.DefaultCryptoSuiteProviderFactory{ProviderName: cryptoProvider}))
if err != nil {
panic(fmt.Sprintf("Failed to create new SDK: %s", err))
}
configProvider := sdk.ConfigProvider()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error getting config")
}
localPeer, err := c.config.GetLocalPeer()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "GetLocalPeer return error")
}
//Find orgname matching localpeer mspID
nconfig, err := configProvider.NetworkConfig()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed to get network config")
}
var orgname string
for name, org := range nconfig.Organizations {
if org.MspID == string(localPeer.MSPid) {
orgname = name | break
}
}
userSession, err := sdk.NewClient(fabsdk.WithUser(txnSnapUser), fabsdk.WithOrg(orgname)).Session()
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "failed getting user session for org %s", orgname)
}
client, err := sdk.FabricProvider().NewResourceClient(userSession.Identity())
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "NewResourceClient failed")
}
c.client = client
logger.Debugf("Done initializing client. Default log level: %s, fabric_sdk_go log level: %s, txn-snap-config log lelvel: %s", logging.GetLevel(""), logging.GetLevel("fabric_sdk_go"), logging.GetLevel("txn-snap-config"))
return nil
}
func (c *clientImpl) Hash(message []byte) (hash []byte, err error) {
hash, err = c.client.CryptoSuite().Hash(message, &bccsp.SHAOpts{})
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed Hash")
}
return hash, err
}
func (c *clientImpl) GetConfig() sdkConfigApi.Config {
return c.client.Config()
}
func (c *clientImpl) GetSigningIdentity() sdkApi.IdentityContext {
return c.client.IdentityContext()
} | random_line_split | |
client.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"fmt"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
sdkConfigApi "github.com/hyperledger/fabric-sdk-go/api/apiconfig"
sdkApi "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
apitxn "github.com/hyperledger/fabric-sdk-go/api/apitxn"
"github.com/hyperledger/fabric-sdk-go/pkg/config"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/pkg/status"
"github.com/securekey/fabric-snaps/util/errors"
sdkorderer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/orderer"
protosMSP "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/bccsp"
pb "github.com/hyperledger/fabric/protos/peer"
logging "github.com/hyperledger/fabric-sdk-go/pkg/logging"
eventapi "github.com/securekey/fabric-snaps/eventservice/api"
eventservice "github.com/securekey/fabric-snaps/eventservice/pkg/localservice"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/factories"
utils "github.com/securekey/fabric-snaps/transactionsnap/cmd/utils"
)
var module = "txnsnap"
var logger = logging.NewLogger(module)
const (
txnSnapUser = "Txn-Snap-User"
)
type clientImpl struct {
sync.RWMutex
client sdkApi.Resource
selectionService api.SelectionService
config api.Config
}
var cachedClient map[string]*clientImpl
//var client *clientImpl
var clientMutex sync.RWMutex
var once sync.Once
// GetInstance returns a singleton instance of the fabric client
func GetInstance(channelID string, config api.Config) (api.Client, error) {
if channelID == "" {
return nil, errors.New(errors.GeneralError, "Channel is required")
}
var c *clientImpl
c.initializeCache()
clientMutex.RLock()
c = cachedClient[channelID] //client from cache
clientMutex.RUnlock()
if c != nil {
return c, nil
}
clientMutex.Lock()
defer clientMutex.Unlock()
c = &clientImpl{selectionService: NewSelectionService(config), config: config}
err := c.initialize(config.GetConfigBytes())
if err != nil {
logger.Errorf("Error initializing client: %s\n", err)
return nil, errors.Wrap(errors.GeneralError, err, "error initializing fabric client")
}
if c.client == nil {
logger.Errorf("Error: SDK client is nil!!!\n")
return nil, errors.New(errors.GeneralError, "SDK client is nil")
}
//put client into cache
cachedClient[channelID] = c
return c, nil
}
//initializeCache used to initialize client cache
func (c *clientImpl) initializeCache() {
once.Do(func() {
logger.Debugf("Client cache was created")
cachedClient = make(map[string]*clientImpl)
})
}
func (c *clientImpl) NewChannel(name string) (sdkApi.Channel, error) {
c.RLock()
chain := c.client.Channel(name)
c.RUnlock()
if chain != nil {
return chain, nil
}
c.Lock()
defer c.Unlock()
channel, err := c.client.NewChannel(name)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error creating new channel")
}
ordererConfig, err := c.client.Config().RandomOrdererConfig()
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "GetRandomOrdererConfig return error")
}
opts, err := withOrdererOptions(ordererConfig)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "withOrdererOptions return error")
}
orderer, err := sdkorderer.New(c.client.Config(), opts...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error adding orderer")
}
channel.AddOrderer(orderer)
return channel, nil
}
func withOrdererOptions(ordererConfig *sdkConfigApi.OrdererConfig) ([]sdkorderer.Option, error) {
opts := []sdkorderer.Option{}
opts = append(opts, sdkorderer.WithURL(ordererConfig.URL))
opts = append(opts, sdkorderer.WithServerName(""))
ocert, err := ordererConfig.TLSCACerts.TLSCert()
if err != nil {
s, ok := status.FromError(err)
// if error is other than EmptyCert, then it should not be ignored, else simply set TLS with no cert
if !ok || s.Code != status.EmptyCert.ToInt32() {
return nil, errors.Wrap(errors.GeneralError, err, "error getting orderer cert from the configs")
}
}
if ocert != nil {
opts = append(opts, sdkorderer.WithTLSCert(ocert))
}
return opts, nil
}
func (c *clientImpl) GetChannel(name string) (sdkApi.Channel, error) {
c.RLock()
defer c.RUnlock()
channel := c.client.Channel(name)
if channel == nil {
return nil, errors.Errorf(errors.GeneralError, "Channel %s has not been created", name)
}
return channel, nil
}
func (c *clientImpl) EndorseTransaction(channel sdkApi.Channel, endorseRequest *api.EndorseTxRequest) (
[]*apitxn.TransactionProposalResponse, error) {
if len(endorseRequest.Args) == 0 {
return nil, errors.Errorf(errors.GeneralError,
"Args cannot be empty. Args[0] is expected to be the function name")
}
var peers []sdkApi.Peer
var processors []apitxn.ProposalProcessor
var err error
var ccIDsForEndorsement []string
if endorseRequest.Targets == nil {
if len(endorseRequest.ChaincodeIDs) == 0 {
ccIDsForEndorsement = append(ccIDsForEndorsement, endorseRequest.ChaincodeID)
} else {
ccIDsForEndorsement = endorseRequest.ChaincodeIDs
}
// Select endorsers
remainingAttempts := c.config.GetEndorserSelectionMaxAttempts()
logger.Infof("Attempting to get endorsers - [%d] attempts...", remainingAttempts)
for len(peers) == 0 && remainingAttempts > 0 {
peers, err = c.selectionService.GetEndorsersForChaincode(channel.Name(),
endorseRequest.PeerFilter, ccIDsForEndorsement...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "error selecting endorsers")
}
if len(peers) == 0 {
remainingAttempts--
logger.Warnf("No endorsers. [%d] remaining attempts...", remainingAttempts)
time.Sleep(c.config.GetEndorserSelectionInterval())
}
}
if len(peers) == 0 {
logger.Errorf("No suitable endorsers found for transaction.")
return nil, errors.New(errors.GeneralError, "no suitable endorsers found for transaction")
}
} else {
peers = endorseRequest.Targets
}
for _, peer := range peers {
logger.Debugf("Target peer %v", peer.URL())
processors = append(processors, apitxn.ProposalProcessor(peer))
}
c.RLock()
defer c.RUnlock()
logger.Debugf("Requesting endorsements from %s, on channel %s",
endorseRequest.ChaincodeID, channel.Name())
request := apitxn.ChaincodeInvokeRequest{
Targets: processors,
Fcn: endorseRequest.Args[0],
Args: utils.GetByteArgs(endorseRequest.Args[1:]),
TransientMap: endorseRequest.TransientData,
ChaincodeID: endorseRequest.ChaincodeID,
}
// TODO: Replace this code with the GO SDK's ChannelClient
responses, _, err := channel.SendTransactionProposal(request)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error sending transaction proposal")
}
// TODO: Replace the following code with the GO SDK's endorsement validation logic
if len(responses) == 0 {
return nil, errors.New(errors.GeneralError, "Did not receive any endorsements")
}
var errorResponses []string
for _, response := range responses {
if response.Err != nil {
errorResponses = append(errorResponses, response.Err.Error())
}
}
if len(errorResponses) > 0 {
return responses, errors.Errorf(errors.GeneralError, strings.Join(errorResponses, "\n"))
}
if len(responses) != len(processors) {
return responses, errors.Errorf(errors.GeneralError, "only %d out of %d responses were received", len(responses), len(processors))
}
return responses, nil
}
func (c *clientImpl) | (channel sdkApi.Channel,
responses []*apitxn.TransactionProposalResponse, registerTxEvent bool) error {
c.RLock()
defer c.RUnlock()
transaction, err := channel.CreateTransaction(responses)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error creating transaction")
}
logger.Debugf("Sending transaction [%s] for commit", transaction.Proposal.TxnID.ID)
var txStatusEventCh <-chan *eventapi.TxStatusEvent
txID := transaction.Proposal.TxnID
if registerTxEvent {
events := eventservice.Get(channel.Name())
reg, eventch, err := events.RegisterTxStatusEvent(txID.ID)
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "unable to register for TxStatus event for TxID [%s] on channel [%s]", txID, channel.Name())
}
defer events.Unregister(reg)
txStatusEventCh = eventch
}
resp, err := channel.SendTransaction(transaction)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error sending transaction")
}
if resp.Err != nil {
return errors.WithMessage(errors.GeneralError, resp.Err, "Error sending transaction")
}
if registerTxEvent {
select {
case txStatusEvent := <-txStatusEventCh:
if txStatusEvent.TxValidationCode != pb.TxValidationCode_VALID {
return errors.Errorf(errors.GeneralError, "transaction [%s] did not commit successfully. Code: [%s]", txID.ID, txStatusEvent.TxValidationCode)
}
logger.Debugf("Transaction [%s] successfully committed", txID.ID)
case <-time.After(c.config.GetCommitTimeout()):
return errors.Errorf(errors.GeneralError, "SendTransaction Didn't receive tx event for txid(%s)", txID.ID)
}
}
return nil
}
// /QueryChannels to query channels based on peer
func (c *clientImpl) QueryChannels(peer sdkApi.Peer) ([]string, error) {
responses, err := c.client.QueryChannels(peer)
if err != nil {
return nil, errors.Errorf(errors.GeneralError, "Error querying channels on peer %+v : %s", peer, err)
}
channels := []string{}
for _, response := range responses.GetChannels() {
channels = append(channels, response.ChannelId)
}
return channels, nil
}
// Verify Transaction Proposal signature
func (c *clientImpl) VerifyTxnProposalSignature(channel sdkApi.Channel, proposalBytes []byte) error {
if channel.MSPManager() == nil {
return errors.Errorf(errors.GeneralError, "Channel %s GetMSPManager is nil", channel.Name())
}
msps, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "GetMSPs return error:%v", err)
}
if len(msps) == 0 {
return errors.Errorf(errors.GeneralError, "Channel %s MSPManager.GetMSPs is empty", channel.Name())
}
signedProposal := &pb.SignedProposal{}
if err := proto.Unmarshal(proposalBytes, signedProposal); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal clientProposalBytes error")
}
creatorBytes, err := utils.GetCreatorFromSignedProposal(signedProposal)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "GetCreatorFromSignedProposal return error")
}
serializedIdentity := &protosMSP.SerializedIdentity{}
if err := proto.Unmarshal(creatorBytes, serializedIdentity); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal creatorBytes error")
}
msp := msps[serializedIdentity.Mspid]
if msp == nil {
return errors.Errorf(errors.GeneralError, "MSP %s not found", serializedIdentity.Mspid)
}
creator, err := msp.DeserializeIdentity(creatorBytes)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "Failed to deserialize creator identity")
}
logger.Debugf("checkSignatureFromCreator info: creator is %s", creator.GetIdentifier())
// ensure that creator is a valid certificate
err = creator.Validate()
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator certificate is not valid")
}
logger.Debugf("verifyTPSignature info: creator is valid")
// validate the signature
err = creator.Verify(signedProposal.ProposalBytes, signedProposal.Signature)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator's signature over the proposal is not valid")
}
logger.Debugf("VerifyTxnProposalSignature exists successfully")
return nil
}
func (c *clientImpl) SetSelectionService(service api.SelectionService) {
c.Lock()
defer c.Unlock()
c.selectionService = service
}
func (c *clientImpl) GetSelectionService() api.SelectionService {
return c.selectionService
}
func (c *clientImpl) GetEventHub() (sdkApi.EventHub, error) {
eventHub, err := events.NewEventHub(c.client)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed to get NewEventHub")
}
return eventHub, err
}
func (c *clientImpl) InitializeChannel(channel sdkApi.Channel) error {
c.RLock()
isInitialized := channel.IsInitialized()
c.RUnlock()
if isInitialized {
logger.Debug("Chain is initialized. Returning.")
return nil
}
c.Lock()
defer c.Unlock()
err := channel.Initialize(nil)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error initializing new channel")
}
// Channel initialized. Add MSP roots to TLS cert pool.
err = c.initializeTLSPool(channel)
if err != nil {
return err
}
return nil
}
func (c *clientImpl) initializeTLSPool(channel sdkApi.Channel) error {
globalCertPool, err := c.client.Config().TLSCACertPool()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed TLSCACertPool")
}
mspMap, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "Error getting MSPs for channel %s: %v",
channel.Name(), err)
}
for _, msp := range mspMap {
for _, cert := range msp.GetTLSRootCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
for _, cert := range msp.GetTLSIntermediateCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
}
c.client.Config().SetTLSCACertPool(globalCertPool)
return nil
}
func (c *clientImpl) initialize(sdkConfig []byte) error {
//Get cryptosuite provider name from name from peerconfig
cryptoProvider, err := c.config.GetCryptoProvider()
if err != nil {
return err
}
sdk, err := fabsdk.New(config.FromRaw(sdkConfig, "yaml"),
fabsdk.WithContextPkg(&factories.CredentialManagerProviderFactory{CryptoPath: c.config.GetMspConfigPath()}),
fabsdk.WithCorePkg(&factories.DefaultCryptoSuiteProviderFactory{ProviderName: cryptoProvider}))
if err != nil {
panic(fmt.Sprintf("Failed to create new SDK: %s", err))
}
configProvider := sdk.ConfigProvider()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error getting config")
}
localPeer, err := c.config.GetLocalPeer()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "GetLocalPeer return error")
}
//Find orgname matching localpeer mspID
nconfig, err := configProvider.NetworkConfig()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed to get network config")
}
var orgname string
for name, org := range nconfig.Organizations {
if org.MspID == string(localPeer.MSPid) {
orgname = name
break
}
}
userSession, err := sdk.NewClient(fabsdk.WithUser(txnSnapUser), fabsdk.WithOrg(orgname)).Session()
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "failed getting user session for org %s", orgname)
}
client, err := sdk.FabricProvider().NewResourceClient(userSession.Identity())
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "NewResourceClient failed")
}
c.client = client
logger.Debugf("Done initializing client. Default log level: %s, fabric_sdk_go log level: %s, txn-snap-config log lelvel: %s", logging.GetLevel(""), logging.GetLevel("fabric_sdk_go"), logging.GetLevel("txn-snap-config"))
return nil
}
func (c *clientImpl) Hash(message []byte) (hash []byte, err error) {
hash, err = c.client.CryptoSuite().Hash(message, &bccsp.SHAOpts{})
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed Hash")
}
return hash, err
}
func (c *clientImpl) GetConfig() sdkConfigApi.Config {
return c.client.Config()
}
func (c *clientImpl) GetSigningIdentity() sdkApi.IdentityContext {
return c.client.IdentityContext()
}
| CommitTransaction | identifier_name |
client.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package client
import (
"fmt"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
sdkConfigApi "github.com/hyperledger/fabric-sdk-go/api/apiconfig"
sdkApi "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
apitxn "github.com/hyperledger/fabric-sdk-go/api/apitxn"
"github.com/hyperledger/fabric-sdk-go/pkg/config"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/pkg/status"
"github.com/securekey/fabric-snaps/util/errors"
sdkorderer "github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/orderer"
protosMSP "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/bccsp"
pb "github.com/hyperledger/fabric/protos/peer"
logging "github.com/hyperledger/fabric-sdk-go/pkg/logging"
eventapi "github.com/securekey/fabric-snaps/eventservice/api"
eventservice "github.com/securekey/fabric-snaps/eventservice/pkg/localservice"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/events"
"github.com/securekey/fabric-snaps/transactionsnap/api"
"github.com/securekey/fabric-snaps/transactionsnap/cmd/client/factories"
utils "github.com/securekey/fabric-snaps/transactionsnap/cmd/utils"
)
var module = "txnsnap"
var logger = logging.NewLogger(module)
const (
txnSnapUser = "Txn-Snap-User"
)
type clientImpl struct {
sync.RWMutex
client sdkApi.Resource
selectionService api.SelectionService
config api.Config
}
var cachedClient map[string]*clientImpl
//var client *clientImpl
var clientMutex sync.RWMutex
var once sync.Once
// GetInstance returns a singleton instance of the fabric client
func GetInstance(channelID string, config api.Config) (api.Client, error) {
if channelID == "" {
return nil, errors.New(errors.GeneralError, "Channel is required")
}
var c *clientImpl
c.initializeCache()
clientMutex.RLock()
c = cachedClient[channelID] //client from cache
clientMutex.RUnlock()
if c != nil {
return c, nil
}
clientMutex.Lock()
defer clientMutex.Unlock()
c = &clientImpl{selectionService: NewSelectionService(config), config: config}
err := c.initialize(config.GetConfigBytes())
if err != nil {
logger.Errorf("Error initializing client: %s\n", err)
return nil, errors.Wrap(errors.GeneralError, err, "error initializing fabric client")
}
if c.client == nil {
logger.Errorf("Error: SDK client is nil!!!\n")
return nil, errors.New(errors.GeneralError, "SDK client is nil")
}
//put client into cache
cachedClient[channelID] = c
return c, nil
}
//initializeCache used to initialize client cache
func (c *clientImpl) initializeCache() {
once.Do(func() {
logger.Debugf("Client cache was created")
cachedClient = make(map[string]*clientImpl)
})
}
func (c *clientImpl) NewChannel(name string) (sdkApi.Channel, error) {
c.RLock()
chain := c.client.Channel(name)
c.RUnlock()
if chain != nil {
return chain, nil
}
c.Lock()
defer c.Unlock()
channel, err := c.client.NewChannel(name)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error creating new channel")
}
ordererConfig, err := c.client.Config().RandomOrdererConfig()
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "GetRandomOrdererConfig return error")
}
opts, err := withOrdererOptions(ordererConfig)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "withOrdererOptions return error")
}
orderer, err := sdkorderer.New(c.client.Config(), opts...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error adding orderer")
}
channel.AddOrderer(orderer)
return channel, nil
}
func withOrdererOptions(ordererConfig *sdkConfigApi.OrdererConfig) ([]sdkorderer.Option, error) {
opts := []sdkorderer.Option{}
opts = append(opts, sdkorderer.WithURL(ordererConfig.URL))
opts = append(opts, sdkorderer.WithServerName(""))
ocert, err := ordererConfig.TLSCACerts.TLSCert()
if err != nil {
s, ok := status.FromError(err)
// if error is other than EmptyCert, then it should not be ignored, else simply set TLS with no cert
if !ok || s.Code != status.EmptyCert.ToInt32() {
return nil, errors.Wrap(errors.GeneralError, err, "error getting orderer cert from the configs")
}
}
if ocert != nil {
opts = append(opts, sdkorderer.WithTLSCert(ocert))
}
return opts, nil
}
func (c *clientImpl) GetChannel(name string) (sdkApi.Channel, error) {
c.RLock()
defer c.RUnlock()
channel := c.client.Channel(name)
if channel == nil {
return nil, errors.Errorf(errors.GeneralError, "Channel %s has not been created", name)
}
return channel, nil
}
func (c *clientImpl) EndorseTransaction(channel sdkApi.Channel, endorseRequest *api.EndorseTxRequest) (
[]*apitxn.TransactionProposalResponse, error) {
if len(endorseRequest.Args) == 0 {
return nil, errors.Errorf(errors.GeneralError,
"Args cannot be empty. Args[0] is expected to be the function name")
}
var peers []sdkApi.Peer
var processors []apitxn.ProposalProcessor
var err error
var ccIDsForEndorsement []string
if endorseRequest.Targets == nil {
if len(endorseRequest.ChaincodeIDs) == 0 {
ccIDsForEndorsement = append(ccIDsForEndorsement, endorseRequest.ChaincodeID)
} else {
ccIDsForEndorsement = endorseRequest.ChaincodeIDs
}
// Select endorsers
remainingAttempts := c.config.GetEndorserSelectionMaxAttempts()
logger.Infof("Attempting to get endorsers - [%d] attempts...", remainingAttempts)
for len(peers) == 0 && remainingAttempts > 0 {
peers, err = c.selectionService.GetEndorsersForChaincode(channel.Name(),
endorseRequest.PeerFilter, ccIDsForEndorsement...)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "error selecting endorsers")
}
if len(peers) == 0 {
remainingAttempts--
logger.Warnf("No endorsers. [%d] remaining attempts...", remainingAttempts)
time.Sleep(c.config.GetEndorserSelectionInterval())
}
}
if len(peers) == 0 {
logger.Errorf("No suitable endorsers found for transaction.")
return nil, errors.New(errors.GeneralError, "no suitable endorsers found for transaction")
}
} else {
peers = endorseRequest.Targets
}
for _, peer := range peers {
logger.Debugf("Target peer %v", peer.URL())
processors = append(processors, apitxn.ProposalProcessor(peer))
}
c.RLock()
defer c.RUnlock()
logger.Debugf("Requesting endorsements from %s, on channel %s",
endorseRequest.ChaincodeID, channel.Name())
request := apitxn.ChaincodeInvokeRequest{
Targets: processors,
Fcn: endorseRequest.Args[0],
Args: utils.GetByteArgs(endorseRequest.Args[1:]),
TransientMap: endorseRequest.TransientData,
ChaincodeID: endorseRequest.ChaincodeID,
}
// TODO: Replace this code with the GO SDK's ChannelClient
responses, _, err := channel.SendTransactionProposal(request)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Error sending transaction proposal")
}
// TODO: Replace the following code with the GO SDK's endorsement validation logic
if len(responses) == 0 {
return nil, errors.New(errors.GeneralError, "Did not receive any endorsements")
}
var errorResponses []string
for _, response := range responses {
if response.Err != nil {
errorResponses = append(errorResponses, response.Err.Error())
}
}
if len(errorResponses) > 0 {
return responses, errors.Errorf(errors.GeneralError, strings.Join(errorResponses, "\n"))
}
if len(responses) != len(processors) {
return responses, errors.Errorf(errors.GeneralError, "only %d out of %d responses were received", len(responses), len(processors))
}
return responses, nil
}
func (c *clientImpl) CommitTransaction(channel sdkApi.Channel,
responses []*apitxn.TransactionProposalResponse, registerTxEvent bool) error {
c.RLock()
defer c.RUnlock()
transaction, err := channel.CreateTransaction(responses)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error creating transaction")
}
logger.Debugf("Sending transaction [%s] for commit", transaction.Proposal.TxnID.ID)
var txStatusEventCh <-chan *eventapi.TxStatusEvent
txID := transaction.Proposal.TxnID
if registerTxEvent {
events := eventservice.Get(channel.Name())
reg, eventch, err := events.RegisterTxStatusEvent(txID.ID)
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "unable to register for TxStatus event for TxID [%s] on channel [%s]", txID, channel.Name())
}
defer events.Unregister(reg)
txStatusEventCh = eventch
}
resp, err := channel.SendTransaction(transaction)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error sending transaction")
}
if resp.Err != nil {
return errors.WithMessage(errors.GeneralError, resp.Err, "Error sending transaction")
}
if registerTxEvent {
select {
case txStatusEvent := <-txStatusEventCh:
if txStatusEvent.TxValidationCode != pb.TxValidationCode_VALID {
return errors.Errorf(errors.GeneralError, "transaction [%s] did not commit successfully. Code: [%s]", txID.ID, txStatusEvent.TxValidationCode)
}
logger.Debugf("Transaction [%s] successfully committed", txID.ID)
case <-time.After(c.config.GetCommitTimeout()):
return errors.Errorf(errors.GeneralError, "SendTransaction Didn't receive tx event for txid(%s)", txID.ID)
}
}
return nil
}
// /QueryChannels to query channels based on peer
func (c *clientImpl) QueryChannels(peer sdkApi.Peer) ([]string, error) {
responses, err := c.client.QueryChannels(peer)
if err != nil {
return nil, errors.Errorf(errors.GeneralError, "Error querying channels on peer %+v : %s", peer, err)
}
channels := []string{}
for _, response := range responses.GetChannels() {
channels = append(channels, response.ChannelId)
}
return channels, nil
}
// Verify Transaction Proposal signature
func (c *clientImpl) VerifyTxnProposalSignature(channel sdkApi.Channel, proposalBytes []byte) error {
if channel.MSPManager() == nil {
return errors.Errorf(errors.GeneralError, "Channel %s GetMSPManager is nil", channel.Name())
}
msps, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "GetMSPs return error:%v", err)
}
if len(msps) == 0 {
return errors.Errorf(errors.GeneralError, "Channel %s MSPManager.GetMSPs is empty", channel.Name())
}
signedProposal := &pb.SignedProposal{}
if err := proto.Unmarshal(proposalBytes, signedProposal); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal clientProposalBytes error")
}
creatorBytes, err := utils.GetCreatorFromSignedProposal(signedProposal)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "GetCreatorFromSignedProposal return error")
}
serializedIdentity := &protosMSP.SerializedIdentity{}
if err := proto.Unmarshal(creatorBytes, serializedIdentity); err != nil {
return errors.Wrap(errors.GeneralError, err, "Unmarshal creatorBytes error")
}
msp := msps[serializedIdentity.Mspid]
if msp == nil {
return errors.Errorf(errors.GeneralError, "MSP %s not found", serializedIdentity.Mspid)
}
creator, err := msp.DeserializeIdentity(creatorBytes)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "Failed to deserialize creator identity")
}
logger.Debugf("checkSignatureFromCreator info: creator is %s", creator.GetIdentifier())
// ensure that creator is a valid certificate
err = creator.Validate()
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator certificate is not valid")
}
logger.Debugf("verifyTPSignature info: creator is valid")
// validate the signature
err = creator.Verify(signedProposal.ProposalBytes, signedProposal.Signature)
if err != nil {
return errors.Wrap(errors.GeneralError, err, "The creator's signature over the proposal is not valid")
}
logger.Debugf("VerifyTxnProposalSignature exists successfully")
return nil
}
func (c *clientImpl) SetSelectionService(service api.SelectionService) {
c.Lock()
defer c.Unlock()
c.selectionService = service
}
func (c *clientImpl) GetSelectionService() api.SelectionService {
return c.selectionService
}
func (c *clientImpl) GetEventHub() (sdkApi.EventHub, error) {
eventHub, err := events.NewEventHub(c.client)
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed to get NewEventHub")
}
return eventHub, err
}
func (c *clientImpl) InitializeChannel(channel sdkApi.Channel) error {
c.RLock()
isInitialized := channel.IsInitialized()
c.RUnlock()
if isInitialized {
logger.Debug("Chain is initialized. Returning.")
return nil
}
c.Lock()
defer c.Unlock()
err := channel.Initialize(nil)
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error initializing new channel")
}
// Channel initialized. Add MSP roots to TLS cert pool.
err = c.initializeTLSPool(channel)
if err != nil {
return err
}
return nil
}
func (c *clientImpl) initializeTLSPool(channel sdkApi.Channel) error {
globalCertPool, err := c.client.Config().TLSCACertPool()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed TLSCACertPool")
}
mspMap, err := channel.MSPManager().GetMSPs()
if err != nil {
return errors.Errorf(errors.GeneralError, "Error getting MSPs for channel %s: %v",
channel.Name(), err)
}
for _, msp := range mspMap {
for _, cert := range msp.GetTLSRootCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
for _, cert := range msp.GetTLSIntermediateCerts() {
globalCertPool.AppendCertsFromPEM(cert)
}
}
c.client.Config().SetTLSCACertPool(globalCertPool)
return nil
}
func (c *clientImpl) initialize(sdkConfig []byte) error |
func (c *clientImpl) Hash(message []byte) (hash []byte, err error) {
hash, err = c.client.CryptoSuite().Hash(message, &bccsp.SHAOpts{})
if err != nil {
return nil, errors.WithMessage(errors.GeneralError, err, "Failed Hash")
}
return hash, err
}
func (c *clientImpl) GetConfig() sdkConfigApi.Config {
return c.client.Config()
}
func (c *clientImpl) GetSigningIdentity() sdkApi.IdentityContext {
return c.client.IdentityContext()
}
| {
//Get cryptosuite provider name from name from peerconfig
cryptoProvider, err := c.config.GetCryptoProvider()
if err != nil {
return err
}
sdk, err := fabsdk.New(config.FromRaw(sdkConfig, "yaml"),
fabsdk.WithContextPkg(&factories.CredentialManagerProviderFactory{CryptoPath: c.config.GetMspConfigPath()}),
fabsdk.WithCorePkg(&factories.DefaultCryptoSuiteProviderFactory{ProviderName: cryptoProvider}))
if err != nil {
panic(fmt.Sprintf("Failed to create new SDK: %s", err))
}
configProvider := sdk.ConfigProvider()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Error getting config")
}
localPeer, err := c.config.GetLocalPeer()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "GetLocalPeer return error")
}
//Find orgname matching localpeer mspID
nconfig, err := configProvider.NetworkConfig()
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "Failed to get network config")
}
var orgname string
for name, org := range nconfig.Organizations {
if org.MspID == string(localPeer.MSPid) {
orgname = name
break
}
}
userSession, err := sdk.NewClient(fabsdk.WithUser(txnSnapUser), fabsdk.WithOrg(orgname)).Session()
if err != nil {
return errors.Wrapf(errors.GeneralError, err, "failed getting user session for org %s", orgname)
}
client, err := sdk.FabricProvider().NewResourceClient(userSession.Identity())
if err != nil {
return errors.WithMessage(errors.GeneralError, err, "NewResourceClient failed")
}
c.client = client
logger.Debugf("Done initializing client. Default log level: %s, fabric_sdk_go log level: %s, txn-snap-config log lelvel: %s", logging.GetLevel(""), logging.GetLevel("fabric_sdk_go"), logging.GetLevel("txn-snap-config"))
return nil
} | identifier_body |
main.go | package main
import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
_ "net/http/pprof"
"github.com/c-sto/recursebuster/librecursebuster"
"github.com/fatih/color"
)
const version = "1.3.3"
func | () {
if runtime.GOOS == "windows" { //lol goos
//can't use color.Error, because *nix etc don't have that for some reason :(
librecursebuster.InitLogger(color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output)
} else {
librecursebuster.InitLogger(os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
}
wg := &sync.WaitGroup{}
cfg := librecursebuster.Config{}
//the state should probably change per different host.. eventually
globalState := librecursebuster.State{
BadResponses: make(map[int]bool),
Whitelist: make(map[string]bool),
Blacklist: make(map[string]bool),
}
globalState.Hosts.Init()
cfg.Version = version
totesTested := uint64(0)
globalState.TotalTested = &totesTested
showVersion := true
flag.BoolVar(&cfg.ShowAll, "all", false, "Show, and write the result of all checks")
flag.BoolVar(&cfg.AppendDir, "appendslash", false, "Append a / to all directory bruteforce requests (like extension, but slash instead of .yourthing)")
flag.StringVar(&cfg.Auth, "auth", "", "Basic auth. Supply this with the base64 encoded portion to be placed after the word 'Basic' in the Authorization header.")
flag.StringVar(&cfg.BadResponses, "bad", "404", "Responses to consider 'bad' or 'not found'. Comma-separated This works the opposite way of gobuster!")
flag.Var(&cfg.BadHeader, "badheader", "Check for presence of this header. If an exact match is found, the response is considered bad.Supply as key:value. Can specify multiple - eg '-badheader Location:cats -badheader X-ATT-DeviceId:XXXXX'")
//flag.StringVar(&cfg.BodyContent, "body", "", "File containing content to send in the body of the request.") all empty body for now
flag.StringVar(&cfg.BlacklistLocation, "blacklist", "", "Blacklist of prefixes to not check. Will not check on exact matches.")
flag.StringVar(&cfg.Canary, "canary", "", "Custom value to use to check for wildcards")
flag.BoolVar(&cfg.CleanOutput, "clean", false, "Output clean URLs to the output file for easy loading into other tools and whatnot.")
flag.StringVar(&cfg.Cookies, "cookies", "", "Any cookies to include with requests. This is smashed into the cookies header, so copy straight from burp I guess.")
flag.BoolVar(&cfg.Debug, "debug", false, "Enable debugging")
flag.IntVar(&cfg.MaxDirs, "dirs", 1, "Maximum directories to perform busting on concurrently NOTE: directories will still be brute forced, this setting simply directs how many should be concurrently bruteforced")
flag.StringVar(&cfg.Extensions, "ext", "", "Extensions to append to checks. Multiple extensions can be specified, comma separate them.")
flag.Var(&cfg.Headers, "headers", "Additional headers to include with request. Supply as key:value. Can specify multiple - eg '-headers X-Forwarded-For:127.0.01 -headers X-ATT-DeviceId:XXXXX'")
flag.BoolVar(&cfg.HTTPS, "https", false, "Use HTTPS instead of HTTP.")
flag.StringVar(&cfg.InputList, "iL", "", "File to use as an input list of URL's to start from")
flag.BoolVar(&cfg.SSLIgnore, "k", false, "Ignore SSL check")
flag.BoolVar(&cfg.ShowLen, "len", false, "Show, and write the length of the response")
flag.BoolVar(&cfg.NoGet, "noget", false, "Do not perform a GET request (only use HEAD request/response)")
flag.BoolVar(&cfg.NoHead, "nohead", false, "Don't optimize GET requests with a HEAD (only send the GET)")
flag.BoolVar(&cfg.NoRecursion, "norecursion", false, "Disable recursion, just work on the specified directory. Also disables spider function.")
flag.BoolVar(&cfg.NoSpider, "nospider", false, "Don't search the page body for links, and directories to add to the spider queue.")
flag.BoolVar(&cfg.NoStatus, "nostatus", false, "Don't print status info (for if it messes with the terminal)")
flag.StringVar(&cfg.Localpath, "o", "."+string(os.PathSeparator)+"busted.txt", "Local file to dump into")
flag.StringVar(&cfg.Methods, "methods", "GET", "Methods to use for checks. Multiple methods can be specified, comma separate them. Requests will be sent with an empty body (unless body is specified)")
flag.StringVar(&cfg.ProxyAddr, "proxy", "", "Proxy configuration options in the form ip:port eg: 127.0.0.1:9050. Note! If you want this to work with burp/use it with a HTTP proxy, specify as http://ip:port")
flag.Float64Var(&cfg.Ratio404, "ratio", 0.95, "Similarity ratio to the 404 canary page.")
flag.BoolVar(&cfg.FollowRedirects, "redirect", false, "Follow redirects")
flag.BoolVar(&cfg.BurpMode, "sitemap", false, "Send 'good' requests to the configured proxy. Requires the proxy flag to be set. ***NOTE: with this option, the proxy is ONLY used for good requests - all other requests go out as normal!***")
flag.IntVar(&cfg.Threads, "t", 1, "Number of concurrent threads")
flag.IntVar(&cfg.Timeout, "timeout", 20, "Timeout (seconds) for HTTP/TCP connections")
flag.StringVar(&cfg.URL, "u", "", "Url to spider")
flag.StringVar(&cfg.Agent, "ua", "RecurseBuster/"+version, "User agent to use when sending requests.")
flag.IntVar(&cfg.VerboseLevel, "v", 0, "Verbosity level for output messages.")
flag.BoolVar(&showVersion, "version", false, "Show version number and exit")
flag.StringVar(&cfg.Wordlist, "w", "", "Wordlist to use for bruteforce. Blank for spider only")
flag.StringVar(&cfg.WhitelistLocation, "whitelist", "", "Whitelist of domains to include in brute-force")
flag.Parse()
if cfg.Debug {
go func() {
http.ListenAndServe("localhost:6061", http.DefaultServeMux)
}()
}
if showVersion {
librecursebuster.PrintBanner(cfg)
os.Exit(0)
}
printChan := make(chan librecursebuster.OutLine, 200)
if cfg.URL == "" && cfg.InputList == "" {
flag.Usage()
os.Exit(1)
}
var h *url.URL
var err error
URLSlice := []string{} //
if cfg.URL != "" {
URLSlice = append(URLSlice, cfg.URL)
}
if cfg.InputList != "" { //can have both -u flag and -iL flag
//must be using an input list
URLList := make(chan string, 10)
go librecursebuster.LoadWords(cfg.InputList, URLList, printChan)
for x := range URLList {
//ensure all urls will parse good
_, err = url.Parse(x)
if err != nil {
panic("URL parse fail: " + err.Error())
}
URLSlice = append(URLSlice, x)
//globalState.Whitelist[u.Host] = true
}
}
h, err = url.Parse(URLSlice[0])
if err != nil {
panic("URL parse fail")
}
if h.Scheme == "" {
if cfg.HTTPS {
h, err = url.Parse("https://" + URLSlice[0])
} else {
h, err = url.Parse("http://" + URLSlice[0])
}
}
for _, x := range strings.Split(cfg.Extensions, ",") {
globalState.Extensions = append(globalState.Extensions, x)
}
for _, x := range strings.Split(cfg.Methods, ",") {
globalState.Methods = append(globalState.Methods, x)
}
for _, x := range strings.Split(cfg.BadResponses, ",") {
i, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
globalState.BadResponses[i] = true //this is probably a candidate for individual urls. Unsure how to config that cleanly though
}
globalState.Hosts.AddHost(h)
//state.ParsedURL = h
client := librecursebuster.ConfigureHTTPClient(cfg, wg, printChan, false)
//setup channels
pages := make(chan librecursebuster.SpiderPage, 1000)
newPages := make(chan librecursebuster.SpiderPage, 10000)
confirmed := make(chan librecursebuster.SpiderPage, 1000)
workers := make(chan struct{}, cfg.Threads)
maxDirs := make(chan struct{}, cfg.MaxDirs)
testChan := make(chan string, 100)
globalState.Client = client
if cfg.BlacklistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.BlacklistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Blacklist[x] = true
}
}
if cfg.WhitelistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.WhitelistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Whitelist[x] = true
}
}
if cfg.Wordlist != "" && cfg.MaxDirs == 1 {
zerod := uint32(0)
globalState.DirbProgress = &zerod
zero := uint32(0)
globalState.WordlistLen = &zero
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.Wordlist, readerChan, printChan)
for _ = range readerChan {
atomic.AddUint32(globalState.WordlistLen, 1)
}
}
canary := librecursebuster.RandString(printChan)
if cfg.Canary != "" {
canary = cfg.Canary
}
librecursebuster.PrintBanner(cfg)
//do first load of urls (send canary requests to make sure we can dirbust them)
globalState.StartTime = time.Now()
globalState.PerSecondShort = new(uint64)
globalState.PerSecondLong = new(uint64)
go librecursebuster.StatusPrinter(cfg, globalState, wg, printChan, testChan)
go librecursebuster.ManageRequests(cfg, globalState, wg, pages, newPages, confirmed, workers, printChan, maxDirs, testChan)
go librecursebuster.ManageNewURLs(cfg, globalState, wg, pages, newPages, printChan)
go librecursebuster.OutputWriter(wg, cfg, confirmed, cfg.Localpath, printChan)
go librecursebuster.StatsTracker(globalState)
librecursebuster.PrintOutput("Starting recursebuster... ", librecursebuster.Info, 0, wg, printChan)
//seed the workers
for _, s := range URLSlice {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
if u.Scheme == "" {
if cfg.HTTPS {
u, err = url.Parse("https://" + s)
} else {
u, err = url.Parse("http://" + s)
}
}
//do canary etc
prefix := u.String()
if len(prefix) > 0 && string(prefix[len(prefix)-1]) != "/" {
prefix = prefix + "/"
}
randURL := fmt.Sprintf("%s%s", prefix, canary)
resp, content, err := librecursebuster.HttpReq("GET", randURL, client, cfg)
if err != nil {
panic("Canary Error, check url is correct: " + randURL + "\n" + err.Error())
}
librecursebuster.PrintOutput(
fmt.Sprintf("Canary sent: %s, Response: %v", randURL, resp.Status),
librecursebuster.Debug, 2, wg, printChan,
)
globalState.Hosts.AddSoft404Content(u.Host, content) // Soft404ResponseBody = xx
x := librecursebuster.SpiderPage{}
x.URL = u.String()
x.Reference = u
if !strings.HasSuffix(u.String(), "/") {
wg.Add(1)
pages <- librecursebuster.SpiderPage{
URL: h.String() + "/",
Reference: h,
}
}
wg.Add(1)
pages <- x
}
//wait for completion
wg.Wait()
}
| main | identifier_name |
main.go | package main
import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
_ "net/http/pprof"
"github.com/c-sto/recursebuster/librecursebuster"
"github.com/fatih/color"
)
const version = "1.3.3"
func main() {
if runtime.GOOS == "windows" { //lol goos
//can't use color.Error, because *nix etc don't have that for some reason :(
librecursebuster.InitLogger(color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output)
} else {
librecursebuster.InitLogger(os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
}
wg := &sync.WaitGroup{}
cfg := librecursebuster.Config{}
//the state should probably change per different host.. eventually
globalState := librecursebuster.State{
BadResponses: make(map[int]bool),
Whitelist: make(map[string]bool),
Blacklist: make(map[string]bool),
}
globalState.Hosts.Init()
cfg.Version = version
totesTested := uint64(0)
globalState.TotalTested = &totesTested
showVersion := true
flag.BoolVar(&cfg.ShowAll, "all", false, "Show, and write the result of all checks")
flag.BoolVar(&cfg.AppendDir, "appendslash", false, "Append a / to all directory bruteforce requests (like extension, but slash instead of .yourthing)")
flag.StringVar(&cfg.Auth, "auth", "", "Basic auth. Supply this with the base64 encoded portion to be placed after the word 'Basic' in the Authorization header.")
flag.StringVar(&cfg.BadResponses, "bad", "404", "Responses to consider 'bad' or 'not found'. Comma-separated This works the opposite way of gobuster!")
flag.Var(&cfg.BadHeader, "badheader", "Check for presence of this header. If an exact match is found, the response is considered bad.Supply as key:value. Can specify multiple - eg '-badheader Location:cats -badheader X-ATT-DeviceId:XXXXX'")
//flag.StringVar(&cfg.BodyContent, "body", "", "File containing content to send in the body of the request.") all empty body for now
flag.StringVar(&cfg.BlacklistLocation, "blacklist", "", "Blacklist of prefixes to not check. Will not check on exact matches.") | flag.StringVar(&cfg.Cookies, "cookies", "", "Any cookies to include with requests. This is smashed into the cookies header, so copy straight from burp I guess.")
flag.BoolVar(&cfg.Debug, "debug", false, "Enable debugging")
flag.IntVar(&cfg.MaxDirs, "dirs", 1, "Maximum directories to perform busting on concurrently NOTE: directories will still be brute forced, this setting simply directs how many should be concurrently bruteforced")
flag.StringVar(&cfg.Extensions, "ext", "", "Extensions to append to checks. Multiple extensions can be specified, comma separate them.")
flag.Var(&cfg.Headers, "headers", "Additional headers to include with request. Supply as key:value. Can specify multiple - eg '-headers X-Forwarded-For:127.0.01 -headers X-ATT-DeviceId:XXXXX'")
flag.BoolVar(&cfg.HTTPS, "https", false, "Use HTTPS instead of HTTP.")
flag.StringVar(&cfg.InputList, "iL", "", "File to use as an input list of URL's to start from")
flag.BoolVar(&cfg.SSLIgnore, "k", false, "Ignore SSL check")
flag.BoolVar(&cfg.ShowLen, "len", false, "Show, and write the length of the response")
flag.BoolVar(&cfg.NoGet, "noget", false, "Do not perform a GET request (only use HEAD request/response)")
flag.BoolVar(&cfg.NoHead, "nohead", false, "Don't optimize GET requests with a HEAD (only send the GET)")
flag.BoolVar(&cfg.NoRecursion, "norecursion", false, "Disable recursion, just work on the specified directory. Also disables spider function.")
flag.BoolVar(&cfg.NoSpider, "nospider", false, "Don't search the page body for links, and directories to add to the spider queue.")
flag.BoolVar(&cfg.NoStatus, "nostatus", false, "Don't print status info (for if it messes with the terminal)")
flag.StringVar(&cfg.Localpath, "o", "."+string(os.PathSeparator)+"busted.txt", "Local file to dump into")
flag.StringVar(&cfg.Methods, "methods", "GET", "Methods to use for checks. Multiple methods can be specified, comma separate them. Requests will be sent with an empty body (unless body is specified)")
flag.StringVar(&cfg.ProxyAddr, "proxy", "", "Proxy configuration options in the form ip:port eg: 127.0.0.1:9050. Note! If you want this to work with burp/use it with a HTTP proxy, specify as http://ip:port")
flag.Float64Var(&cfg.Ratio404, "ratio", 0.95, "Similarity ratio to the 404 canary page.")
flag.BoolVar(&cfg.FollowRedirects, "redirect", false, "Follow redirects")
flag.BoolVar(&cfg.BurpMode, "sitemap", false, "Send 'good' requests to the configured proxy. Requires the proxy flag to be set. ***NOTE: with this option, the proxy is ONLY used for good requests - all other requests go out as normal!***")
flag.IntVar(&cfg.Threads, "t", 1, "Number of concurrent threads")
flag.IntVar(&cfg.Timeout, "timeout", 20, "Timeout (seconds) for HTTP/TCP connections")
flag.StringVar(&cfg.URL, "u", "", "Url to spider")
flag.StringVar(&cfg.Agent, "ua", "RecurseBuster/"+version, "User agent to use when sending requests.")
flag.IntVar(&cfg.VerboseLevel, "v", 0, "Verbosity level for output messages.")
flag.BoolVar(&showVersion, "version", false, "Show version number and exit")
flag.StringVar(&cfg.Wordlist, "w", "", "Wordlist to use for bruteforce. Blank for spider only")
flag.StringVar(&cfg.WhitelistLocation, "whitelist", "", "Whitelist of domains to include in brute-force")
flag.Parse()
if cfg.Debug {
go func() {
http.ListenAndServe("localhost:6061", http.DefaultServeMux)
}()
}
if showVersion {
librecursebuster.PrintBanner(cfg)
os.Exit(0)
}
printChan := make(chan librecursebuster.OutLine, 200)
if cfg.URL == "" && cfg.InputList == "" {
flag.Usage()
os.Exit(1)
}
var h *url.URL
var err error
URLSlice := []string{} //
if cfg.URL != "" {
URLSlice = append(URLSlice, cfg.URL)
}
if cfg.InputList != "" { //can have both -u flag and -iL flag
//must be using an input list
URLList := make(chan string, 10)
go librecursebuster.LoadWords(cfg.InputList, URLList, printChan)
for x := range URLList {
//ensure all urls will parse good
_, err = url.Parse(x)
if err != nil {
panic("URL parse fail: " + err.Error())
}
URLSlice = append(URLSlice, x)
//globalState.Whitelist[u.Host] = true
}
}
h, err = url.Parse(URLSlice[0])
if err != nil {
panic("URL parse fail")
}
if h.Scheme == "" {
if cfg.HTTPS {
h, err = url.Parse("https://" + URLSlice[0])
} else {
h, err = url.Parse("http://" + URLSlice[0])
}
}
for _, x := range strings.Split(cfg.Extensions, ",") {
globalState.Extensions = append(globalState.Extensions, x)
}
for _, x := range strings.Split(cfg.Methods, ",") {
globalState.Methods = append(globalState.Methods, x)
}
for _, x := range strings.Split(cfg.BadResponses, ",") {
i, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
globalState.BadResponses[i] = true //this is probably a candidate for individual urls. Unsure how to config that cleanly though
}
globalState.Hosts.AddHost(h)
//state.ParsedURL = h
client := librecursebuster.ConfigureHTTPClient(cfg, wg, printChan, false)
//setup channels
pages := make(chan librecursebuster.SpiderPage, 1000)
newPages := make(chan librecursebuster.SpiderPage, 10000)
confirmed := make(chan librecursebuster.SpiderPage, 1000)
workers := make(chan struct{}, cfg.Threads)
maxDirs := make(chan struct{}, cfg.MaxDirs)
testChan := make(chan string, 100)
globalState.Client = client
if cfg.BlacklistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.BlacklistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Blacklist[x] = true
}
}
if cfg.WhitelistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.WhitelistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Whitelist[x] = true
}
}
if cfg.Wordlist != "" && cfg.MaxDirs == 1 {
zerod := uint32(0)
globalState.DirbProgress = &zerod
zero := uint32(0)
globalState.WordlistLen = &zero
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.Wordlist, readerChan, printChan)
for _ = range readerChan {
atomic.AddUint32(globalState.WordlistLen, 1)
}
}
canary := librecursebuster.RandString(printChan)
if cfg.Canary != "" {
canary = cfg.Canary
}
librecursebuster.PrintBanner(cfg)
//do first load of urls (send canary requests to make sure we can dirbust them)
globalState.StartTime = time.Now()
globalState.PerSecondShort = new(uint64)
globalState.PerSecondLong = new(uint64)
go librecursebuster.StatusPrinter(cfg, globalState, wg, printChan, testChan)
go librecursebuster.ManageRequests(cfg, globalState, wg, pages, newPages, confirmed, workers, printChan, maxDirs, testChan)
go librecursebuster.ManageNewURLs(cfg, globalState, wg, pages, newPages, printChan)
go librecursebuster.OutputWriter(wg, cfg, confirmed, cfg.Localpath, printChan)
go librecursebuster.StatsTracker(globalState)
librecursebuster.PrintOutput("Starting recursebuster... ", librecursebuster.Info, 0, wg, printChan)
//seed the workers
for _, s := range URLSlice {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
if u.Scheme == "" {
if cfg.HTTPS {
u, err = url.Parse("https://" + s)
} else {
u, err = url.Parse("http://" + s)
}
}
//do canary etc
prefix := u.String()
if len(prefix) > 0 && string(prefix[len(prefix)-1]) != "/" {
prefix = prefix + "/"
}
randURL := fmt.Sprintf("%s%s", prefix, canary)
resp, content, err := librecursebuster.HttpReq("GET", randURL, client, cfg)
if err != nil {
panic("Canary Error, check url is correct: " + randURL + "\n" + err.Error())
}
librecursebuster.PrintOutput(
fmt.Sprintf("Canary sent: %s, Response: %v", randURL, resp.Status),
librecursebuster.Debug, 2, wg, printChan,
)
globalState.Hosts.AddSoft404Content(u.Host, content) // Soft404ResponseBody = xx
x := librecursebuster.SpiderPage{}
x.URL = u.String()
x.Reference = u
if !strings.HasSuffix(u.String(), "/") {
wg.Add(1)
pages <- librecursebuster.SpiderPage{
URL: h.String() + "/",
Reference: h,
}
}
wg.Add(1)
pages <- x
}
//wait for completion
wg.Wait()
} | flag.StringVar(&cfg.Canary, "canary", "", "Custom value to use to check for wildcards")
flag.BoolVar(&cfg.CleanOutput, "clean", false, "Output clean URLs to the output file for easy loading into other tools and whatnot.") | random_line_split |
main.go | package main
import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
_ "net/http/pprof"
"github.com/c-sto/recursebuster/librecursebuster"
"github.com/fatih/color"
)
const version = "1.3.3"
func main() | {
if runtime.GOOS == "windows" { //lol goos
//can't use color.Error, because *nix etc don't have that for some reason :(
librecursebuster.InitLogger(color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output)
} else {
librecursebuster.InitLogger(os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
}
wg := &sync.WaitGroup{}
cfg := librecursebuster.Config{}
//the state should probably change per different host.. eventually
globalState := librecursebuster.State{
BadResponses: make(map[int]bool),
Whitelist: make(map[string]bool),
Blacklist: make(map[string]bool),
}
globalState.Hosts.Init()
cfg.Version = version
totesTested := uint64(0)
globalState.TotalTested = &totesTested
showVersion := true
flag.BoolVar(&cfg.ShowAll, "all", false, "Show, and write the result of all checks")
flag.BoolVar(&cfg.AppendDir, "appendslash", false, "Append a / to all directory bruteforce requests (like extension, but slash instead of .yourthing)")
flag.StringVar(&cfg.Auth, "auth", "", "Basic auth. Supply this with the base64 encoded portion to be placed after the word 'Basic' in the Authorization header.")
flag.StringVar(&cfg.BadResponses, "bad", "404", "Responses to consider 'bad' or 'not found'. Comma-separated This works the opposite way of gobuster!")
flag.Var(&cfg.BadHeader, "badheader", "Check for presence of this header. If an exact match is found, the response is considered bad.Supply as key:value. Can specify multiple - eg '-badheader Location:cats -badheader X-ATT-DeviceId:XXXXX'")
//flag.StringVar(&cfg.BodyContent, "body", "", "File containing content to send in the body of the request.") all empty body for now
flag.StringVar(&cfg.BlacklistLocation, "blacklist", "", "Blacklist of prefixes to not check. Will not check on exact matches.")
flag.StringVar(&cfg.Canary, "canary", "", "Custom value to use to check for wildcards")
flag.BoolVar(&cfg.CleanOutput, "clean", false, "Output clean URLs to the output file for easy loading into other tools and whatnot.")
flag.StringVar(&cfg.Cookies, "cookies", "", "Any cookies to include with requests. This is smashed into the cookies header, so copy straight from burp I guess.")
flag.BoolVar(&cfg.Debug, "debug", false, "Enable debugging")
flag.IntVar(&cfg.MaxDirs, "dirs", 1, "Maximum directories to perform busting on concurrently NOTE: directories will still be brute forced, this setting simply directs how many should be concurrently bruteforced")
flag.StringVar(&cfg.Extensions, "ext", "", "Extensions to append to checks. Multiple extensions can be specified, comma separate them.")
flag.Var(&cfg.Headers, "headers", "Additional headers to include with request. Supply as key:value. Can specify multiple - eg '-headers X-Forwarded-For:127.0.01 -headers X-ATT-DeviceId:XXXXX'")
flag.BoolVar(&cfg.HTTPS, "https", false, "Use HTTPS instead of HTTP.")
flag.StringVar(&cfg.InputList, "iL", "", "File to use as an input list of URL's to start from")
flag.BoolVar(&cfg.SSLIgnore, "k", false, "Ignore SSL check")
flag.BoolVar(&cfg.ShowLen, "len", false, "Show, and write the length of the response")
flag.BoolVar(&cfg.NoGet, "noget", false, "Do not perform a GET request (only use HEAD request/response)")
flag.BoolVar(&cfg.NoHead, "nohead", false, "Don't optimize GET requests with a HEAD (only send the GET)")
flag.BoolVar(&cfg.NoRecursion, "norecursion", false, "Disable recursion, just work on the specified directory. Also disables spider function.")
flag.BoolVar(&cfg.NoSpider, "nospider", false, "Don't search the page body for links, and directories to add to the spider queue.")
flag.BoolVar(&cfg.NoStatus, "nostatus", false, "Don't print status info (for if it messes with the terminal)")
flag.StringVar(&cfg.Localpath, "o", "."+string(os.PathSeparator)+"busted.txt", "Local file to dump into")
flag.StringVar(&cfg.Methods, "methods", "GET", "Methods to use for checks. Multiple methods can be specified, comma separate them. Requests will be sent with an empty body (unless body is specified)")
flag.StringVar(&cfg.ProxyAddr, "proxy", "", "Proxy configuration options in the form ip:port eg: 127.0.0.1:9050. Note! If you want this to work with burp/use it with a HTTP proxy, specify as http://ip:port")
flag.Float64Var(&cfg.Ratio404, "ratio", 0.95, "Similarity ratio to the 404 canary page.")
flag.BoolVar(&cfg.FollowRedirects, "redirect", false, "Follow redirects")
flag.BoolVar(&cfg.BurpMode, "sitemap", false, "Send 'good' requests to the configured proxy. Requires the proxy flag to be set. ***NOTE: with this option, the proxy is ONLY used for good requests - all other requests go out as normal!***")
flag.IntVar(&cfg.Threads, "t", 1, "Number of concurrent threads")
flag.IntVar(&cfg.Timeout, "timeout", 20, "Timeout (seconds) for HTTP/TCP connections")
flag.StringVar(&cfg.URL, "u", "", "Url to spider")
flag.StringVar(&cfg.Agent, "ua", "RecurseBuster/"+version, "User agent to use when sending requests.")
flag.IntVar(&cfg.VerboseLevel, "v", 0, "Verbosity level for output messages.")
flag.BoolVar(&showVersion, "version", false, "Show version number and exit")
flag.StringVar(&cfg.Wordlist, "w", "", "Wordlist to use for bruteforce. Blank for spider only")
flag.StringVar(&cfg.WhitelistLocation, "whitelist", "", "Whitelist of domains to include in brute-force")
flag.Parse()
if cfg.Debug {
go func() {
http.ListenAndServe("localhost:6061", http.DefaultServeMux)
}()
}
if showVersion {
librecursebuster.PrintBanner(cfg)
os.Exit(0)
}
printChan := make(chan librecursebuster.OutLine, 200)
if cfg.URL == "" && cfg.InputList == "" {
flag.Usage()
os.Exit(1)
}
var h *url.URL
var err error
URLSlice := []string{} //
if cfg.URL != "" {
URLSlice = append(URLSlice, cfg.URL)
}
if cfg.InputList != "" { //can have both -u flag and -iL flag
//must be using an input list
URLList := make(chan string, 10)
go librecursebuster.LoadWords(cfg.InputList, URLList, printChan)
for x := range URLList {
//ensure all urls will parse good
_, err = url.Parse(x)
if err != nil {
panic("URL parse fail: " + err.Error())
}
URLSlice = append(URLSlice, x)
//globalState.Whitelist[u.Host] = true
}
}
h, err = url.Parse(URLSlice[0])
if err != nil {
panic("URL parse fail")
}
if h.Scheme == "" {
if cfg.HTTPS {
h, err = url.Parse("https://" + URLSlice[0])
} else {
h, err = url.Parse("http://" + URLSlice[0])
}
}
for _, x := range strings.Split(cfg.Extensions, ",") {
globalState.Extensions = append(globalState.Extensions, x)
}
for _, x := range strings.Split(cfg.Methods, ",") {
globalState.Methods = append(globalState.Methods, x)
}
for _, x := range strings.Split(cfg.BadResponses, ",") {
i, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
globalState.BadResponses[i] = true //this is probably a candidate for individual urls. Unsure how to config that cleanly though
}
globalState.Hosts.AddHost(h)
//state.ParsedURL = h
client := librecursebuster.ConfigureHTTPClient(cfg, wg, printChan, false)
//setup channels
pages := make(chan librecursebuster.SpiderPage, 1000)
newPages := make(chan librecursebuster.SpiderPage, 10000)
confirmed := make(chan librecursebuster.SpiderPage, 1000)
workers := make(chan struct{}, cfg.Threads)
maxDirs := make(chan struct{}, cfg.MaxDirs)
testChan := make(chan string, 100)
globalState.Client = client
if cfg.BlacklistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.BlacklistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Blacklist[x] = true
}
}
if cfg.WhitelistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.WhitelistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Whitelist[x] = true
}
}
if cfg.Wordlist != "" && cfg.MaxDirs == 1 {
zerod := uint32(0)
globalState.DirbProgress = &zerod
zero := uint32(0)
globalState.WordlistLen = &zero
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.Wordlist, readerChan, printChan)
for _ = range readerChan {
atomic.AddUint32(globalState.WordlistLen, 1)
}
}
canary := librecursebuster.RandString(printChan)
if cfg.Canary != "" {
canary = cfg.Canary
}
librecursebuster.PrintBanner(cfg)
//do first load of urls (send canary requests to make sure we can dirbust them)
globalState.StartTime = time.Now()
globalState.PerSecondShort = new(uint64)
globalState.PerSecondLong = new(uint64)
go librecursebuster.StatusPrinter(cfg, globalState, wg, printChan, testChan)
go librecursebuster.ManageRequests(cfg, globalState, wg, pages, newPages, confirmed, workers, printChan, maxDirs, testChan)
go librecursebuster.ManageNewURLs(cfg, globalState, wg, pages, newPages, printChan)
go librecursebuster.OutputWriter(wg, cfg, confirmed, cfg.Localpath, printChan)
go librecursebuster.StatsTracker(globalState)
librecursebuster.PrintOutput("Starting recursebuster... ", librecursebuster.Info, 0, wg, printChan)
//seed the workers
for _, s := range URLSlice {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
if u.Scheme == "" {
if cfg.HTTPS {
u, err = url.Parse("https://" + s)
} else {
u, err = url.Parse("http://" + s)
}
}
//do canary etc
prefix := u.String()
if len(prefix) > 0 && string(prefix[len(prefix)-1]) != "/" {
prefix = prefix + "/"
}
randURL := fmt.Sprintf("%s%s", prefix, canary)
resp, content, err := librecursebuster.HttpReq("GET", randURL, client, cfg)
if err != nil {
panic("Canary Error, check url is correct: " + randURL + "\n" + err.Error())
}
librecursebuster.PrintOutput(
fmt.Sprintf("Canary sent: %s, Response: %v", randURL, resp.Status),
librecursebuster.Debug, 2, wg, printChan,
)
globalState.Hosts.AddSoft404Content(u.Host, content) // Soft404ResponseBody = xx
x := librecursebuster.SpiderPage{}
x.URL = u.String()
x.Reference = u
if !strings.HasSuffix(u.String(), "/") {
wg.Add(1)
pages <- librecursebuster.SpiderPage{
URL: h.String() + "/",
Reference: h,
}
}
wg.Add(1)
pages <- x
}
//wait for completion
wg.Wait()
} | identifier_body | |
main.go | package main
import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
_ "net/http/pprof"
"github.com/c-sto/recursebuster/librecursebuster"
"github.com/fatih/color"
)
const version = "1.3.3"
func main() {
if runtime.GOOS == "windows" { //lol goos
//can't use color.Error, because *nix etc don't have that for some reason :(
librecursebuster.InitLogger(color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output, color.Output)
} else {
librecursebuster.InitLogger(os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
}
wg := &sync.WaitGroup{}
cfg := librecursebuster.Config{}
//the state should probably change per different host.. eventually
globalState := librecursebuster.State{
BadResponses: make(map[int]bool),
Whitelist: make(map[string]bool),
Blacklist: make(map[string]bool),
}
globalState.Hosts.Init()
cfg.Version = version
totesTested := uint64(0)
globalState.TotalTested = &totesTested
showVersion := true
flag.BoolVar(&cfg.ShowAll, "all", false, "Show, and write the result of all checks")
flag.BoolVar(&cfg.AppendDir, "appendslash", false, "Append a / to all directory bruteforce requests (like extension, but slash instead of .yourthing)")
flag.StringVar(&cfg.Auth, "auth", "", "Basic auth. Supply this with the base64 encoded portion to be placed after the word 'Basic' in the Authorization header.")
flag.StringVar(&cfg.BadResponses, "bad", "404", "Responses to consider 'bad' or 'not found'. Comma-separated This works the opposite way of gobuster!")
flag.Var(&cfg.BadHeader, "badheader", "Check for presence of this header. If an exact match is found, the response is considered bad.Supply as key:value. Can specify multiple - eg '-badheader Location:cats -badheader X-ATT-DeviceId:XXXXX'")
//flag.StringVar(&cfg.BodyContent, "body", "", "File containing content to send in the body of the request.") all empty body for now
flag.StringVar(&cfg.BlacklistLocation, "blacklist", "", "Blacklist of prefixes to not check. Will not check on exact matches.")
flag.StringVar(&cfg.Canary, "canary", "", "Custom value to use to check for wildcards")
flag.BoolVar(&cfg.CleanOutput, "clean", false, "Output clean URLs to the output file for easy loading into other tools and whatnot.")
flag.StringVar(&cfg.Cookies, "cookies", "", "Any cookies to include with requests. This is smashed into the cookies header, so copy straight from burp I guess.")
flag.BoolVar(&cfg.Debug, "debug", false, "Enable debugging")
flag.IntVar(&cfg.MaxDirs, "dirs", 1, "Maximum directories to perform busting on concurrently NOTE: directories will still be brute forced, this setting simply directs how many should be concurrently bruteforced")
flag.StringVar(&cfg.Extensions, "ext", "", "Extensions to append to checks. Multiple extensions can be specified, comma separate them.")
flag.Var(&cfg.Headers, "headers", "Additional headers to include with request. Supply as key:value. Can specify multiple - eg '-headers X-Forwarded-For:127.0.01 -headers X-ATT-DeviceId:XXXXX'")
flag.BoolVar(&cfg.HTTPS, "https", false, "Use HTTPS instead of HTTP.")
flag.StringVar(&cfg.InputList, "iL", "", "File to use as an input list of URL's to start from")
flag.BoolVar(&cfg.SSLIgnore, "k", false, "Ignore SSL check")
flag.BoolVar(&cfg.ShowLen, "len", false, "Show, and write the length of the response")
flag.BoolVar(&cfg.NoGet, "noget", false, "Do not perform a GET request (only use HEAD request/response)")
flag.BoolVar(&cfg.NoHead, "nohead", false, "Don't optimize GET requests with a HEAD (only send the GET)")
flag.BoolVar(&cfg.NoRecursion, "norecursion", false, "Disable recursion, just work on the specified directory. Also disables spider function.")
flag.BoolVar(&cfg.NoSpider, "nospider", false, "Don't search the page body for links, and directories to add to the spider queue.")
flag.BoolVar(&cfg.NoStatus, "nostatus", false, "Don't print status info (for if it messes with the terminal)")
flag.StringVar(&cfg.Localpath, "o", "."+string(os.PathSeparator)+"busted.txt", "Local file to dump into")
flag.StringVar(&cfg.Methods, "methods", "GET", "Methods to use for checks. Multiple methods can be specified, comma separate them. Requests will be sent with an empty body (unless body is specified)")
flag.StringVar(&cfg.ProxyAddr, "proxy", "", "Proxy configuration options in the form ip:port eg: 127.0.0.1:9050. Note! If you want this to work with burp/use it with a HTTP proxy, specify as http://ip:port")
flag.Float64Var(&cfg.Ratio404, "ratio", 0.95, "Similarity ratio to the 404 canary page.")
flag.BoolVar(&cfg.FollowRedirects, "redirect", false, "Follow redirects")
flag.BoolVar(&cfg.BurpMode, "sitemap", false, "Send 'good' requests to the configured proxy. Requires the proxy flag to be set. ***NOTE: with this option, the proxy is ONLY used for good requests - all other requests go out as normal!***")
flag.IntVar(&cfg.Threads, "t", 1, "Number of concurrent threads")
flag.IntVar(&cfg.Timeout, "timeout", 20, "Timeout (seconds) for HTTP/TCP connections")
flag.StringVar(&cfg.URL, "u", "", "Url to spider")
flag.StringVar(&cfg.Agent, "ua", "RecurseBuster/"+version, "User agent to use when sending requests.")
flag.IntVar(&cfg.VerboseLevel, "v", 0, "Verbosity level for output messages.")
flag.BoolVar(&showVersion, "version", false, "Show version number and exit")
flag.StringVar(&cfg.Wordlist, "w", "", "Wordlist to use for bruteforce. Blank for spider only")
flag.StringVar(&cfg.WhitelistLocation, "whitelist", "", "Whitelist of domains to include in brute-force")
flag.Parse()
if cfg.Debug {
go func() {
http.ListenAndServe("localhost:6061", http.DefaultServeMux)
}()
}
if showVersion {
librecursebuster.PrintBanner(cfg)
os.Exit(0)
}
printChan := make(chan librecursebuster.OutLine, 200)
if cfg.URL == "" && cfg.InputList == "" {
flag.Usage()
os.Exit(1)
}
var h *url.URL
var err error
URLSlice := []string{} //
if cfg.URL != "" {
URLSlice = append(URLSlice, cfg.URL)
}
if cfg.InputList != "" { //can have both -u flag and -iL flag
//must be using an input list
URLList := make(chan string, 10)
go librecursebuster.LoadWords(cfg.InputList, URLList, printChan)
for x := range URLList {
//ensure all urls will parse good
_, err = url.Parse(x)
if err != nil {
panic("URL parse fail: " + err.Error())
}
URLSlice = append(URLSlice, x)
//globalState.Whitelist[u.Host] = true
}
}
h, err = url.Parse(URLSlice[0])
if err != nil {
panic("URL parse fail")
}
if h.Scheme == "" {
if cfg.HTTPS {
h, err = url.Parse("https://" + URLSlice[0])
} else {
h, err = url.Parse("http://" + URLSlice[0])
}
}
for _, x := range strings.Split(cfg.Extensions, ",") {
globalState.Extensions = append(globalState.Extensions, x)
}
for _, x := range strings.Split(cfg.Methods, ",") {
globalState.Methods = append(globalState.Methods, x)
}
for _, x := range strings.Split(cfg.BadResponses, ",") {
i, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
globalState.BadResponses[i] = true //this is probably a candidate for individual urls. Unsure how to config that cleanly though
}
globalState.Hosts.AddHost(h)
//state.ParsedURL = h
client := librecursebuster.ConfigureHTTPClient(cfg, wg, printChan, false)
//setup channels
pages := make(chan librecursebuster.SpiderPage, 1000)
newPages := make(chan librecursebuster.SpiderPage, 10000)
confirmed := make(chan librecursebuster.SpiderPage, 1000)
workers := make(chan struct{}, cfg.Threads)
maxDirs := make(chan struct{}, cfg.MaxDirs)
testChan := make(chan string, 100)
globalState.Client = client
if cfg.BlacklistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.BlacklistLocation, readerChan, printChan)
for x := range readerChan |
}
if cfg.WhitelistLocation != "" {
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.WhitelistLocation, readerChan, printChan)
for x := range readerChan {
globalState.Whitelist[x] = true
}
}
if cfg.Wordlist != "" && cfg.MaxDirs == 1 {
zerod := uint32(0)
globalState.DirbProgress = &zerod
zero := uint32(0)
globalState.WordlistLen = &zero
readerChan := make(chan string, 100)
go librecursebuster.LoadWords(cfg.Wordlist, readerChan, printChan)
for _ = range readerChan {
atomic.AddUint32(globalState.WordlistLen, 1)
}
}
canary := librecursebuster.RandString(printChan)
if cfg.Canary != "" {
canary = cfg.Canary
}
librecursebuster.PrintBanner(cfg)
//do first load of urls (send canary requests to make sure we can dirbust them)
globalState.StartTime = time.Now()
globalState.PerSecondShort = new(uint64)
globalState.PerSecondLong = new(uint64)
go librecursebuster.StatusPrinter(cfg, globalState, wg, printChan, testChan)
go librecursebuster.ManageRequests(cfg, globalState, wg, pages, newPages, confirmed, workers, printChan, maxDirs, testChan)
go librecursebuster.ManageNewURLs(cfg, globalState, wg, pages, newPages, printChan)
go librecursebuster.OutputWriter(wg, cfg, confirmed, cfg.Localpath, printChan)
go librecursebuster.StatsTracker(globalState)
librecursebuster.PrintOutput("Starting recursebuster... ", librecursebuster.Info, 0, wg, printChan)
//seed the workers
for _, s := range URLSlice {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
if u.Scheme == "" {
if cfg.HTTPS {
u, err = url.Parse("https://" + s)
} else {
u, err = url.Parse("http://" + s)
}
}
//do canary etc
prefix := u.String()
if len(prefix) > 0 && string(prefix[len(prefix)-1]) != "/" {
prefix = prefix + "/"
}
randURL := fmt.Sprintf("%s%s", prefix, canary)
resp, content, err := librecursebuster.HttpReq("GET", randURL, client, cfg)
if err != nil {
panic("Canary Error, check url is correct: " + randURL + "\n" + err.Error())
}
librecursebuster.PrintOutput(
fmt.Sprintf("Canary sent: %s, Response: %v", randURL, resp.Status),
librecursebuster.Debug, 2, wg, printChan,
)
globalState.Hosts.AddSoft404Content(u.Host, content) // Soft404ResponseBody = xx
x := librecursebuster.SpiderPage{}
x.URL = u.String()
x.Reference = u
if !strings.HasSuffix(u.String(), "/") {
wg.Add(1)
pages <- librecursebuster.SpiderPage{
URL: h.String() + "/",
Reference: h,
}
}
wg.Add(1)
pages <- x
}
//wait for completion
wg.Wait()
}
| {
globalState.Blacklist[x] = true
} | conditional_block |
upload.js | YUI({
combine : true,
comboBase : 'http://img4.cache.netease.com/service/combo?'
}).use('uploader', 'overlay', "json-parse",function (Y) {
if (logined) {
if (Y.Uploader.TYPE != "none") {
var selectFile = Y.one('#selectFile'), //浏览
selectFileBtn = Y.one('#selectFileBtn'), //选择按钮
fileInput = Y.one('#fileInput'),
uploading = Y.one("#uploading"), //上传进度box
uploadProgess = Y.one('#uploadProgess'), //进度条
//uploadRate = Y.one('#uploadRate'), //速度
timeRemaining = Y.one('#timeRemaining'), //剩余时间
uploadFinish = Y.one('#uploadFinish'), //上传完成
uploadSuccess = Y.one('#uploadSuccess'), //上传成功
videoInfo = Y.one('#videoInfo'), //视频信息
bytesLoaded = 0,
timer,
file, //文件
fileName,
fileSize,
videoForm = Y.one('#videoForm'), //视频信息box
cancelUploadOl, //弹窗
errorUploadOl,
videoId, //视频id
videoData = {
type : 60
},
videoInfoForm = videoForm.one('form'),
videoType = '原创', //存储视频类型用于显示
hasFilledIn = false, //是否提交信息
isUploadComplete = false, //是否上传完
inputTitle = Y.one('#inputTitle'),
inputDescription = Y.one('#inputDescription'),
inputTag = Y.one('#inputTag'),
inputType=Y.one('#inputType'),
wordCount = Y.one('#descriptionTips span'),
unloadEvent,
uploader = new Y.Uploader({
selectFilesButton : Y.one('#selectFilesButton'),
buttonClassNames : {
hover :
'v-upload-btn-hover',
active :
'',
disabled :
'',
focus :
''
},
swfURL : "http://swf.ws.126.net/v/flashuploader.swf?" + Math.random(),
uploadURL : "http://ugcv.ws.netease.com/video/transcode?watermark=1&enctype=1&ugc=1"
});
uploader.render('#selectFileBtn');
//发送视频信息
function sendvideoForm() {
hasFilledIn = false;
isUploadComplete = false;
document.domain = "163.com";
document.getElementById('crossdomain').contentWindow.Y.io('http://so.v.163.com/ugc/addvideoasync.do', {
method : 'POST',
data : videoData,
on : {
success : function (id,o) {
data = Y.JSON.parse(o.responseText);
if(data.info=='add success'){
uploading.addClass('hidden');
videoInfo.addClass('hidden');
uploadFinish.addClass('hidden');
videoForm.addClass('hidden');
uploadSuccess.removeClass('hidden');
uploadSuccess.one('#videoCover').setContent('<img src="' + videoData.imagepath + '" width="120" height="90" alt="' + videoData.title.replace(/"|'/g, '') + '">');
window.onbeforeunload=null;
videoCallbacked && videoCallbacked(data);
}
}
}
});
}
//秒算时间
function timeConver(scd) {
var h = 0,
m = 0,
s = 0,
resultString;
if (scd >= 3600) {
h = Math.floor(scd / 3600);
if (scd - h * 3600 > 0) {
m = Math.floor((scd - h * 3600) / 60);
if (scd - h * 3600 - m * 60 > 0) {
s = scd - h * 3600 - m * 60;
}
};
resultString = h + '\u5c0f\u65f6' + m + '\u5206' + s + '\u79d2';
} else if (scd >= 60) {
m = Math.floor(scd / 60);
if (scd - m * 60 > 0) {
s = scd - m * 60;
};
resultString = m + '\u5206' + s + '\u79d2';
} else {
s = scd;
resultString = s + '\u79d2';
};
return resultString;
}
//截字
function cutString(str,num){
var strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr,
fullWidthNum=num/2;
if (strLen > num) {
for (i = fullWidthNum; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= num) {
return str.substr(0, i);
}
};
}else{
return str;
}
}
//选择文件
uploader.after("fileselect", function (event) {
var suffix;
file = event.fileList[0];
fileName=file.get('name').replace(/<|>/g, '');
suffix=fileName.substring(fileName.lastIndexOf('.'));
videoData.title = cutString(fileName.replace(suffix,''),60);
fileName=videoData.title+suffix;
fileSize = file.get('size');
selectFileBtn.setStyle('z-index', -1);
selectFile.addClass('hidden');
uploading.removeClass('hidden');
//videoForm.removeClass('hidden');
Y.all('.fileUploading').each(function (node) {
node.setContent(fileName);
});
Y.one('#inputTitle').set('value', videoData.title);
uploader.uploadAll();
unloadEvent=window.onbeforeunload = function (e) {
e = e || window.event;
e.returnValue = '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
return '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
}
});
//上传进度
uploader.on("uploadprogress", function (event) {
//uploadProgess.one('span').setContent(event.percentLoaded + '%');
uploadProgess.one(".bar").setStyle("width", event.percentLoaded + "%");
if (!timer) {
timer = Y.later(1000, event, function () {
var rate = (event.bytesLoaded - bytesLoaded) / 1000, //速度:byte/1000=kb
secondsRemain = Math.floor((event.bytesTotal - bytesLoaded) / 1000 / rate); //剩余秒数:kb/rate=s取整
//uploadRate.setContent(rate.toFixed(2) + 'KB/s');
bytesLoaded = event.bytesLoaded;
//timeRemaining.setContent(timeConver(secondsRemain));
timer = undefined;
});
}
if (event.percentLoaded == 100) {
//timeRemaining.setContent('0秒');
timer.cancel();
}
});
//上传结束
uploader.on("uploadcomplete", function (event) {
var msgData=Y.JSON.parse(event.data);
if (msgData.state == 'success') {
videoData.rvideoid=msgData.vid;
videoData.imagepath=msgData.snapshot[2]||msgData.snapshot[1]||msgData.snapshot[0]||'';
videoData.filepath=msgData.filepath;
videoData.soureip=msgData.soureip;
videoData.playlength=msgData.playlength;
videoData.filename=msgData.filename;
isUploadComplete = true;
if (hasFilledIn) {
sendvideoForm();
} else {
uploading.addClass('hidden');
uploadFinish.removeClass('hidden');
}
videoInfoForm.one(".v-upload-form-submit")._node.click();
| tion (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '.errorUploadY');
mask.setStyle('display', 'block');
errorUploadOl.centered();
errorUploadOl.show();
}
});
uploader.on('uploaderror',function(e){
Y.log(e);
});
//取消上传
Y.one('#cancelUpload').on('click', function (e) {
e.preventDefault();
var closeEvent = Y.on('click', function (e) {
e.preventDefault();
cancelUploadOl.hide();
mask.setStyle('display', 'none');
closeEvent.detach();
}, '.cancelUploadN'),
confirmEvent = Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '#cancelUploadY');
mask.setStyle('display', 'block');
cancelUploadOl.centered();
cancelUploadOl.show();
});
//提交信息
videoInfoForm.on('submit', function (e) {
e.preventDefault();
var title = inputTitle.get('value'),
description = inputDescription.get('value'),
tag = inputTag.get('value');
hasFilledIn = true;
if (title) {
videoData.title = title.replace(/<|>/g, '');
}
if (description) {
videoData.description = description.replace(/<|>/g, '');
}
if (tag && tag != '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
videoData.tag = tag.replace(/,/g, ',').replace(/<|>/g, '');
}
if (!isUploadComplete) {
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
videoInfo.one('#titleInfo').setContent(videoData.title);
videoInfo.one('#descriptionInfo').setContent(videoData.description);
videoInfo.one('#tagInfo').setContent(videoData.tag);
videoInfo.one('#typeInfo').setContent(videoType);
} else {
sendvideoForm();
}
});
//继续上传
Y.one('#uploadBtn').on('click', function (e) {
e.preventDefault();
window.location.reload();
});
//修改视频信息
Y.one('#modifyVideoInfo').on('click', function () {
hasFilledIn = false;
videoInfo.addClass('hidden');
videoForm.one('#cancelModify').removeClass('hidden');
videoForm.removeClass('hidden');
});
//取消修改
Y.one('#cancelModify').on('click', function (e) {
e.preventDefault();
hasFilledIn = true;
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
this.addClass('hidden');
});
/*表单开始*/
//关闭成功提示
Y.one('#closeTip').on('click', function () {
this.ancestor().addClass('hidden');
});
//标题
inputTitle.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//简介
inputDescription.on('focus', function () {
Y.one('#descriptionTips').setStyle('visibility', 'visible');
});
inputDescription.on('blur', function () {
Y.one('#descriptionTips').setStyle('visibility', 'hidden');
});
inputDescription.on('keyup', function () {
var str = this.get('value'),
strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr;
if (strLen > 500) {
for (i = 250; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= 500) {
this.set('value', str.substr(0, i));
break;
}
};
strLen = 500;
}
wordCount.setContent(Math.floor((500 - strLen) / 2));
});
//标签
inputTag.on('focus', function () {
if (this.get('value') == '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
Y.one('#tagTips').setStyle('visibility', 'visible');
this.removeClass('gray');
this.set('value', '');
}
});
inputTag.on('blur', function () {
if (this.get('value') == '') {
Y.one('#tagTips').setStyle('visibility', 'hidden');
this.addClass('gray');
this.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
}
});
inputTag.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//类型
Y.on('click', function () {
videoData.type = this.get('value');
videoType = this.next().getContent();
}, '#inputType input');
/*表单结束*/
Y.on('domready', function () {
var cancelAlert = Y.one('#cancelAlert'),
errorAlert = Y.one('#errorAlert');
cancelUploadOl = new Y.Overlay({
srcNode : cancelAlert,
visible : false,
zIndex : 10000,
render : true
});
errorUploadOl = new Y.Overlay({
srcNode : errorAlert,
visible : false,
zIndex : 10000,
render : true
});
cancelAlert.removeClass('hidden');
errorAlert.removeClass('hidden');
});
inputTag.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
inputDescription.set('value', '');
}
} else {
mask.setStyle('display', 'block');
loginOl.centered();
loginOl.show();
Y.on('click', function () {
if (Y.UA.ie) {
if (history.length == 0) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
} else {
if (history.length == 1) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
}
}, '.v-login-close');
}
});
| } else {
Y.on('click', func | conditional_block |
upload.js | YUI({
combine : true,
comboBase : 'http://img4.cache.netease.com/service/combo?'
| selectFileBtn = Y.one('#selectFileBtn'), //选择按钮
fileInput = Y.one('#fileInput'),
uploading = Y.one("#uploading"), //上传进度box
uploadProgess = Y.one('#uploadProgess'), //进度条
//uploadRate = Y.one('#uploadRate'), //速度
timeRemaining = Y.one('#timeRemaining'), //剩余时间
uploadFinish = Y.one('#uploadFinish'), //上传完成
uploadSuccess = Y.one('#uploadSuccess'), //上传成功
videoInfo = Y.one('#videoInfo'), //视频信息
bytesLoaded = 0,
timer,
file, //文件
fileName,
fileSize,
videoForm = Y.one('#videoForm'), //视频信息box
cancelUploadOl, //弹窗
errorUploadOl,
videoId, //视频id
videoData = {
type : 60
},
videoInfoForm = videoForm.one('form'),
videoType = '原创', //存储视频类型用于显示
hasFilledIn = false, //是否提交信息
isUploadComplete = false, //是否上传完
inputTitle = Y.one('#inputTitle'),
inputDescription = Y.one('#inputDescription'),
inputTag = Y.one('#inputTag'),
inputType=Y.one('#inputType'),
wordCount = Y.one('#descriptionTips span'),
unloadEvent,
uploader = new Y.Uploader({
selectFilesButton : Y.one('#selectFilesButton'),
buttonClassNames : {
hover :
'v-upload-btn-hover',
active :
'',
disabled :
'',
focus :
''
},
swfURL : "http://swf.ws.126.net/v/flashuploader.swf?" + Math.random(),
uploadURL : "http://ugcv.ws.netease.com/video/transcode?watermark=1&enctype=1&ugc=1"
});
uploader.render('#selectFileBtn');
//发送视频信息
function sendvideoForm() {
hasFilledIn = false;
isUploadComplete = false;
document.domain = "163.com";
document.getElementById('crossdomain').contentWindow.Y.io('http://so.v.163.com/ugc/addvideoasync.do', {
method : 'POST',
data : videoData,
on : {
success : function (id,o) {
data = Y.JSON.parse(o.responseText);
if(data.info=='add success'){
uploading.addClass('hidden');
videoInfo.addClass('hidden');
uploadFinish.addClass('hidden');
videoForm.addClass('hidden');
uploadSuccess.removeClass('hidden');
uploadSuccess.one('#videoCover').setContent('<img src="' + videoData.imagepath + '" width="120" height="90" alt="' + videoData.title.replace(/"|'/g, '') + '">');
window.onbeforeunload=null;
videoCallbacked && videoCallbacked(data);
}
}
}
});
}
//秒算时间
function timeConver(scd) {
var h = 0,
m = 0,
s = 0,
resultString;
if (scd >= 3600) {
h = Math.floor(scd / 3600);
if (scd - h * 3600 > 0) {
m = Math.floor((scd - h * 3600) / 60);
if (scd - h * 3600 - m * 60 > 0) {
s = scd - h * 3600 - m * 60;
}
};
resultString = h + '\u5c0f\u65f6' + m + '\u5206' + s + '\u79d2';
} else if (scd >= 60) {
m = Math.floor(scd / 60);
if (scd - m * 60 > 0) {
s = scd - m * 60;
};
resultString = m + '\u5206' + s + '\u79d2';
} else {
s = scd;
resultString = s + '\u79d2';
};
return resultString;
}
//截字
function cutString(str,num){
var strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr,
fullWidthNum=num/2;
if (strLen > num) {
for (i = fullWidthNum; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= num) {
return str.substr(0, i);
}
};
}else{
return str;
}
}
//选择文件
uploader.after("fileselect", function (event) {
var suffix;
file = event.fileList[0];
fileName=file.get('name').replace(/<|>/g, '');
suffix=fileName.substring(fileName.lastIndexOf('.'));
videoData.title = cutString(fileName.replace(suffix,''),60);
fileName=videoData.title+suffix;
fileSize = file.get('size');
selectFileBtn.setStyle('z-index', -1);
selectFile.addClass('hidden');
uploading.removeClass('hidden');
//videoForm.removeClass('hidden');
Y.all('.fileUploading').each(function (node) {
node.setContent(fileName);
});
Y.one('#inputTitle').set('value', videoData.title);
uploader.uploadAll();
unloadEvent=window.onbeforeunload = function (e) {
e = e || window.event;
e.returnValue = '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
return '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
}
});
//上传进度
uploader.on("uploadprogress", function (event) {
//uploadProgess.one('span').setContent(event.percentLoaded + '%');
uploadProgess.one(".bar").setStyle("width", event.percentLoaded + "%");
if (!timer) {
timer = Y.later(1000, event, function () {
var rate = (event.bytesLoaded - bytesLoaded) / 1000, //速度:byte/1000=kb
secondsRemain = Math.floor((event.bytesTotal - bytesLoaded) / 1000 / rate); //剩余秒数:kb/rate=s取整
//uploadRate.setContent(rate.toFixed(2) + 'KB/s');
bytesLoaded = event.bytesLoaded;
//timeRemaining.setContent(timeConver(secondsRemain));
timer = undefined;
});
}
if (event.percentLoaded == 100) {
//timeRemaining.setContent('0秒');
timer.cancel();
}
});
//上传结束
uploader.on("uploadcomplete", function (event) {
var msgData=Y.JSON.parse(event.data);
if (msgData.state == 'success') {
videoData.rvideoid=msgData.vid;
videoData.imagepath=msgData.snapshot[2]||msgData.snapshot[1]||msgData.snapshot[0]||'';
videoData.filepath=msgData.filepath;
videoData.soureip=msgData.soureip;
videoData.playlength=msgData.playlength;
videoData.filename=msgData.filename;
isUploadComplete = true;
if (hasFilledIn) {
sendvideoForm();
} else {
uploading.addClass('hidden');
uploadFinish.removeClass('hidden');
}
videoInfoForm.one(".v-upload-form-submit")._node.click();
} else {
Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '.errorUploadY');
mask.setStyle('display', 'block');
errorUploadOl.centered();
errorUploadOl.show();
}
});
uploader.on('uploaderror',function(e){
Y.log(e);
});
//取消上传
Y.one('#cancelUpload').on('click', function (e) {
e.preventDefault();
var closeEvent = Y.on('click', function (e) {
e.preventDefault();
cancelUploadOl.hide();
mask.setStyle('display', 'none');
closeEvent.detach();
}, '.cancelUploadN'),
confirmEvent = Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '#cancelUploadY');
mask.setStyle('display', 'block');
cancelUploadOl.centered();
cancelUploadOl.show();
});
//提交信息
videoInfoForm.on('submit', function (e) {
e.preventDefault();
var title = inputTitle.get('value'),
description = inputDescription.get('value'),
tag = inputTag.get('value');
hasFilledIn = true;
if (title) {
videoData.title = title.replace(/<|>/g, '');
}
if (description) {
videoData.description = description.replace(/<|>/g, '');
}
if (tag && tag != '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
videoData.tag = tag.replace(/,/g, ',').replace(/<|>/g, '');
}
if (!isUploadComplete) {
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
videoInfo.one('#titleInfo').setContent(videoData.title);
videoInfo.one('#descriptionInfo').setContent(videoData.description);
videoInfo.one('#tagInfo').setContent(videoData.tag);
videoInfo.one('#typeInfo').setContent(videoType);
} else {
sendvideoForm();
}
});
//继续上传
Y.one('#uploadBtn').on('click', function (e) {
e.preventDefault();
window.location.reload();
});
//修改视频信息
Y.one('#modifyVideoInfo').on('click', function () {
hasFilledIn = false;
videoInfo.addClass('hidden');
videoForm.one('#cancelModify').removeClass('hidden');
videoForm.removeClass('hidden');
});
//取消修改
Y.one('#cancelModify').on('click', function (e) {
e.preventDefault();
hasFilledIn = true;
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
this.addClass('hidden');
});
/*表单开始*/
//关闭成功提示
Y.one('#closeTip').on('click', function () {
this.ancestor().addClass('hidden');
});
//标题
inputTitle.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//简介
inputDescription.on('focus', function () {
Y.one('#descriptionTips').setStyle('visibility', 'visible');
});
inputDescription.on('blur', function () {
Y.one('#descriptionTips').setStyle('visibility', 'hidden');
});
inputDescription.on('keyup', function () {
var str = this.get('value'),
strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr;
if (strLen > 500) {
for (i = 250; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= 500) {
this.set('value', str.substr(0, i));
break;
}
};
strLen = 500;
}
wordCount.setContent(Math.floor((500 - strLen) / 2));
});
//标签
inputTag.on('focus', function () {
if (this.get('value') == '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
Y.one('#tagTips').setStyle('visibility', 'visible');
this.removeClass('gray');
this.set('value', '');
}
});
inputTag.on('blur', function () {
if (this.get('value') == '') {
Y.one('#tagTips').setStyle('visibility', 'hidden');
this.addClass('gray');
this.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
}
});
inputTag.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//类型
Y.on('click', function () {
videoData.type = this.get('value');
videoType = this.next().getContent();
}, '#inputType input');
/*表单结束*/
Y.on('domready', function () {
var cancelAlert = Y.one('#cancelAlert'),
errorAlert = Y.one('#errorAlert');
cancelUploadOl = new Y.Overlay({
srcNode : cancelAlert,
visible : false,
zIndex : 10000,
render : true
});
errorUploadOl = new Y.Overlay({
srcNode : errorAlert,
visible : false,
zIndex : 10000,
render : true
});
cancelAlert.removeClass('hidden');
errorAlert.removeClass('hidden');
});
inputTag.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
inputDescription.set('value', '');
}
} else {
mask.setStyle('display', 'block');
loginOl.centered();
loginOl.show();
Y.on('click', function () {
if (Y.UA.ie) {
if (history.length == 0) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
} else {
if (history.length == 1) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
}
}, '.v-login-close');
}
}); | }).use('uploader', 'overlay', "json-parse",function (Y) {
if (logined) {
if (Y.Uploader.TYPE != "none") {
var selectFile = Y.one('#selectFile'), //浏览
| random_line_split |
upload.js | YUI({
combine : true,
comboBase : 'http://img4.cache.netease.com/service/combo?'
}).use('uploader', 'overlay', "json-parse",function (Y) {
if (logined) {
if (Y.Uploader.TYPE != "none") {
var selectFile = Y.one('#selectFile'), //浏览
selectFileBtn = Y.one('#selectFileBtn'), //选择按钮
fileInput = Y.one('#fileInput'),
uploading = Y.one("#uploading"), //上传进度box
uploadProgess = Y.one('#uploadProgess'), //进度条
//uploadRate = Y.one('#uploadRate'), //速度
timeRemaining = Y.one('#timeRemaining'), //剩余时间
uploadFinish = Y.one('#uploadFinish'), //上传完成
uploadSuccess = Y.one('#uploadSuccess'), //上传成功
videoInfo = Y.one('#videoInfo'), //视频信息
bytesLoaded = 0,
timer,
file, //文件
fileName,
fileSize,
videoForm = Y.one('#videoForm'), //视频信息box
cancelUploadOl, //弹窗
errorUploadOl,
videoId, //视频id
videoData = {
type : 60
},
videoInfoForm = videoForm.one('form'),
videoType = '原创', //存储视频类型用于显示
hasFilledIn = false, //是否提交信息
isUploadComplete = false, //是否上传完
inputTitle = Y.one('#inputTitle'),
inputDescription = Y.one('#inputDescription'),
inputTag = Y.one('#inputTag'),
inputType=Y.one('#inputType'),
wordCount = Y.one('#descriptionTips span'),
unloadEvent,
uploader = new Y.Uploader({
selectFilesButton : Y.one('#selectFilesButton'),
buttonClassNames : {
hover :
'v-upload-btn-hover',
active :
'',
disabled :
'',
focus :
''
},
swfURL : "http://swf.ws.126.net/v/flashuploader.swf?" + Math.random(),
uploadURL : "http://ugcv.ws.netease.com/video/transcode?watermark=1&enctype=1&ugc=1"
});
uploader.render('#selectFileBtn');
//发送视频信息
function sendvideoForm() {
hasFilledIn = false;
isUploadComplete = false;
document.domain = "163.com";
document.getElementById('c | contentWindow.Y.io('http://so.v.163.com/ugc/addvideoasync.do', {
method : 'POST',
data : videoData,
on : {
success : function (id,o) {
data = Y.JSON.parse(o.responseText);
if(data.info=='add success'){
uploading.addClass('hidden');
videoInfo.addClass('hidden');
uploadFinish.addClass('hidden');
videoForm.addClass('hidden');
uploadSuccess.removeClass('hidden');
uploadSuccess.one('#videoCover').setContent('<img src="' + videoData.imagepath + '" width="120" height="90" alt="' + videoData.title.replace(/"|'/g, '') + '">');
window.onbeforeunload=null;
videoCallbacked && videoCallbacked(data);
}
}
}
});
}
//秒算时间
function timeConver(scd) {
var h = 0,
m = 0,
s = 0,
resultString;
if (scd >= 3600) {
h = Math.floor(scd / 3600);
if (scd - h * 3600 > 0) {
m = Math.floor((scd - h * 3600) / 60);
if (scd - h * 3600 - m * 60 > 0) {
s = scd - h * 3600 - m * 60;
}
};
resultString = h + '\u5c0f\u65f6' + m + '\u5206' + s + '\u79d2';
} else if (scd >= 60) {
m = Math.floor(scd / 60);
if (scd - m * 60 > 0) {
s = scd - m * 60;
};
resultString = m + '\u5206' + s + '\u79d2';
} else {
s = scd;
resultString = s + '\u79d2';
};
return resultString;
}
//截字
function cutString(str,num){
var strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr,
fullWidthNum=num/2;
if (strLen > num) {
for (i = fullWidthNum; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= num) {
return str.substr(0, i);
}
};
}else{
return str;
}
}
//选择文件
uploader.after("fileselect", function (event) {
var suffix;
file = event.fileList[0];
fileName=file.get('name').replace(/<|>/g, '');
suffix=fileName.substring(fileName.lastIndexOf('.'));
videoData.title = cutString(fileName.replace(suffix,''),60);
fileName=videoData.title+suffix;
fileSize = file.get('size');
selectFileBtn.setStyle('z-index', -1);
selectFile.addClass('hidden');
uploading.removeClass('hidden');
//videoForm.removeClass('hidden');
Y.all('.fileUploading').each(function (node) {
node.setContent(fileName);
});
Y.one('#inputTitle').set('value', videoData.title);
uploader.uploadAll();
unloadEvent=window.onbeforeunload = function (e) {
e = e || window.event;
e.returnValue = '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
return '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
}
});
//上传进度
uploader.on("uploadprogress", function (event) {
//uploadProgess.one('span').setContent(event.percentLoaded + '%');
uploadProgess.one(".bar").setStyle("width", event.percentLoaded + "%");
if (!timer) {
timer = Y.later(1000, event, function () {
var rate = (event.bytesLoaded - bytesLoaded) / 1000, //速度:byte/1000=kb
secondsRemain = Math.floor((event.bytesTotal - bytesLoaded) / 1000 / rate); //剩余秒数:kb/rate=s取整
//uploadRate.setContent(rate.toFixed(2) + 'KB/s');
bytesLoaded = event.bytesLoaded;
//timeRemaining.setContent(timeConver(secondsRemain));
timer = undefined;
});
}
if (event.percentLoaded == 100) {
//timeRemaining.setContent('0秒');
timer.cancel();
}
});
//上传结束
uploader.on("uploadcomplete", function (event) {
var msgData=Y.JSON.parse(event.data);
if (msgData.state == 'success') {
videoData.rvideoid=msgData.vid;
videoData.imagepath=msgData.snapshot[2]||msgData.snapshot[1]||msgData.snapshot[0]||'';
videoData.filepath=msgData.filepath;
videoData.soureip=msgData.soureip;
videoData.playlength=msgData.playlength;
videoData.filename=msgData.filename;
isUploadComplete = true;
if (hasFilledIn) {
sendvideoForm();
} else {
uploading.addClass('hidden');
uploadFinish.removeClass('hidden');
}
videoInfoForm.one(".v-upload-form-submit")._node.click();
} else {
Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '.errorUploadY');
mask.setStyle('display', 'block');
errorUploadOl.centered();
errorUploadOl.show();
}
});
uploader.on('uploaderror',function(e){
Y.log(e);
});
//取消上传
Y.one('#cancelUpload').on('click', function (e) {
e.preventDefault();
var closeEvent = Y.on('click', function (e) {
e.preventDefault();
cancelUploadOl.hide();
mask.setStyle('display', 'none');
closeEvent.detach();
}, '.cancelUploadN'),
confirmEvent = Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '#cancelUploadY');
mask.setStyle('display', 'block');
cancelUploadOl.centered();
cancelUploadOl.show();
});
//提交信息
videoInfoForm.on('submit', function (e) {
e.preventDefault();
var title = inputTitle.get('value'),
description = inputDescription.get('value'),
tag = inputTag.get('value');
hasFilledIn = true;
if (title) {
videoData.title = title.replace(/<|>/g, '');
}
if (description) {
videoData.description = description.replace(/<|>/g, '');
}
if (tag && tag != '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
videoData.tag = tag.replace(/,/g, ',').replace(/<|>/g, '');
}
if (!isUploadComplete) {
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
videoInfo.one('#titleInfo').setContent(videoData.title);
videoInfo.one('#descriptionInfo').setContent(videoData.description);
videoInfo.one('#tagInfo').setContent(videoData.tag);
videoInfo.one('#typeInfo').setContent(videoType);
} else {
sendvideoForm();
}
});
//继续上传
Y.one('#uploadBtn').on('click', function (e) {
e.preventDefault();
window.location.reload();
});
//修改视频信息
Y.one('#modifyVideoInfo').on('click', function () {
hasFilledIn = false;
videoInfo.addClass('hidden');
videoForm.one('#cancelModify').removeClass('hidden');
videoForm.removeClass('hidden');
});
//取消修改
Y.one('#cancelModify').on('click', function (e) {
e.preventDefault();
hasFilledIn = true;
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
this.addClass('hidden');
});
/*表单开始*/
//关闭成功提示
Y.one('#closeTip').on('click', function () {
this.ancestor().addClass('hidden');
});
//标题
inputTitle.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//简介
inputDescription.on('focus', function () {
Y.one('#descriptionTips').setStyle('visibility', 'visible');
});
inputDescription.on('blur', function () {
Y.one('#descriptionTips').setStyle('visibility', 'hidden');
});
inputDescription.on('keyup', function () {
var str = this.get('value'),
strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr;
if (strLen > 500) {
for (i = 250; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= 500) {
this.set('value', str.substr(0, i));
break;
}
};
strLen = 500;
}
wordCount.setContent(Math.floor((500 - strLen) / 2));
});
//标签
inputTag.on('focus', function () {
if (this.get('value') == '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
Y.one('#tagTips').setStyle('visibility', 'visible');
this.removeClass('gray');
this.set('value', '');
}
});
inputTag.on('blur', function () {
if (this.get('value') == '') {
Y.one('#tagTips').setStyle('visibility', 'hidden');
this.addClass('gray');
this.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
}
});
inputTag.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//类型
Y.on('click', function () {
videoData.type = this.get('value');
videoType = this.next().getContent();
}, '#inputType input');
/*表单结束*/
Y.on('domready', function () {
var cancelAlert = Y.one('#cancelAlert'),
errorAlert = Y.one('#errorAlert');
cancelUploadOl = new Y.Overlay({
srcNode : cancelAlert,
visible : false,
zIndex : 10000,
render : true
});
errorUploadOl = new Y.Overlay({
srcNode : errorAlert,
visible : false,
zIndex : 10000,
render : true
});
cancelAlert.removeClass('hidden');
errorAlert.removeClass('hidden');
});
inputTag.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
inputDescription.set('value', '');
}
} else {
mask.setStyle('display', 'block');
loginOl.centered();
loginOl.show();
Y.on('click', function () {
if (Y.UA.ie) {
if (history.length == 0) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
} else {
if (history.length == 1) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
}
}, '.v-login-close');
}
});
| rossdomain'). | identifier_name |
upload.js | YUI({
combine : true,
comboBase : 'http://img4.cache.netease.com/service/combo?'
}).use('uploader', 'overlay', "json-parse",function (Y) {
if (logined) {
if (Y.Uploader.TYPE != "none") {
var selectFile = Y.one('#selectFile'), //浏览
selectFileBtn = Y.one('#selectFileBtn'), //选择按钮
fileInput = Y.one('#fileInput'),
uploading = Y.one("#uploading"), //上传进度box
uploadProgess = Y.one('#uploadProgess'), //进度条
//uploadRate = Y.one('#uploadRate'), //速度
timeRemaining = Y.one('#timeRemaining'), //剩余时间
uploadFinish = Y.one('#uploadFinish'), //上传完成
uploadSuccess = Y.one('#uploadSuccess'), //上传成功
videoInfo = Y.one('#videoInfo'), //视频信息
bytesLoaded = 0,
timer,
file, //文件
fileName,
fileSize,
videoForm = Y.one('#videoForm'), //视频信息box
cancelUploadOl, //弹窗
errorUploadOl,
videoId, //视频id
videoData = {
type : 60
},
videoInfoForm = videoForm.one('form'),
videoType = '原创', //存储视频类型用于显示
hasFilledIn = false, //是否提交信息
isUploadComplete = false, //是否上传完
inputTitle = Y.one('#inputTitle'),
inputDescription = Y.one('#inputDescription'),
inputTag = Y.one('#inputTag'),
inputType=Y.one('#inputType'),
wordCount = Y.one('#descriptionTips span'),
unloadEvent,
uploader = new Y.Uploader({
selectFilesButton : Y.one('#selectFilesButton'),
buttonClassNames : {
hover :
'v-upload-btn-hover',
active :
'',
disabled :
'',
focus :
''
},
swfURL : "http://swf.ws.126.net/v/flashuploader.swf?" + Math.random(),
uploadURL : "http://ugcv.ws.netease.com/video/transcode?watermark=1&enctype=1&ugc=1"
});
uploader.render('#selectFileBtn');
//发送视频信息
function sendvideoForm() {
hasFilledIn = false;
isUploadComplete = false;
document.domain = "163.com";
document.getElementById('crossdomain').contentWindow.Y.io('http://so.v.163.com/ugc/addvideoasync.do', {
method : 'POST',
data : videoData,
on : {
success : function (id,o) {
data = Y.JSON.parse(o.responseText);
if(data.info=='add success'){
uploading.addClass('hidden');
videoInfo.addClass('hidden');
uploadFinish.addClass('hidden');
videoForm.addClass('hidden');
uploadSuccess.removeClass('hidden');
uploadSuccess.one('#videoCover').setContent('<img src="' + videoData.imagepath + '" width="120" height="90" alt="' + videoData.title.replace(/"|'/g, '') + '">');
window.onbeforeunload=null;
videoCallbacked && videoCallbacked(data);
}
}
}
});
}
//秒算时间
function timeConver(scd) {
var h = 0,
m = 0,
s = 0,
resultString;
if (scd >= 3600) {
h = Math.floor(scd / 3600);
if (scd - h * 3600 > 0) | dthNum=num/2;
if (strLen > num) {
for (i = fullWidthNum; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= num) {
return str.substr(0, i);
}
};
}else{
return str;
}
}
//选择文件
uploader.after("fileselect", function (event) {
var suffix;
file = event.fileList[0];
fileName=file.get('name').replace(/<|>/g, '');
suffix=fileName.substring(fileName.lastIndexOf('.'));
videoData.title = cutString(fileName.replace(suffix,''),60);
fileName=videoData.title+suffix;
fileSize = file.get('size');
selectFileBtn.setStyle('z-index', -1);
selectFile.addClass('hidden');
uploading.removeClass('hidden');
//videoForm.removeClass('hidden');
Y.all('.fileUploading').each(function (node) {
node.setContent(fileName);
});
Y.one('#inputTitle').set('value', videoData.title);
uploader.uploadAll();
unloadEvent=window.onbeforeunload = function (e) {
e = e || window.event;
e.returnValue = '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
return '\u89c6\u9891\u4e0a\u4f20\u64cd\u4f5c\u5c1a\u672a\u5b8c\u6210\uff0c\u60a8\u7684\u64cd\u4f5c\u4f1a\u5bfc\u81f4\u89c6\u9891\u4e0a\u4f20\u88ab\u53d6\u6d88\uff0c\u662f\u5426\u786e\u5b9a\u5173\u95ed\u7a97\u53e3\uff1f';
}
});
//上传进度
uploader.on("uploadprogress", function (event) {
//uploadProgess.one('span').setContent(event.percentLoaded + '%');
uploadProgess.one(".bar").setStyle("width", event.percentLoaded + "%");
if (!timer) {
timer = Y.later(1000, event, function () {
var rate = (event.bytesLoaded - bytesLoaded) / 1000, //速度:byte/1000=kb
secondsRemain = Math.floor((event.bytesTotal - bytesLoaded) / 1000 / rate); //剩余秒数:kb/rate=s取整
//uploadRate.setContent(rate.toFixed(2) + 'KB/s');
bytesLoaded = event.bytesLoaded;
//timeRemaining.setContent(timeConver(secondsRemain));
timer = undefined;
});
}
if (event.percentLoaded == 100) {
//timeRemaining.setContent('0秒');
timer.cancel();
}
});
//上传结束
uploader.on("uploadcomplete", function (event) {
var msgData=Y.JSON.parse(event.data);
if (msgData.state == 'success') {
videoData.rvideoid=msgData.vid;
videoData.imagepath=msgData.snapshot[2]||msgData.snapshot[1]||msgData.snapshot[0]||'';
videoData.filepath=msgData.filepath;
videoData.soureip=msgData.soureip;
videoData.playlength=msgData.playlength;
videoData.filename=msgData.filename;
isUploadComplete = true;
if (hasFilledIn) {
sendvideoForm();
} else {
uploading.addClass('hidden');
uploadFinish.removeClass('hidden');
}
videoInfoForm.one(".v-upload-form-submit")._node.click();
} else {
Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '.errorUploadY');
mask.setStyle('display', 'block');
errorUploadOl.centered();
errorUploadOl.show();
}
});
uploader.on('uploaderror',function(e){
Y.log(e);
});
//取消上传
Y.one('#cancelUpload').on('click', function (e) {
e.preventDefault();
var closeEvent = Y.on('click', function (e) {
e.preventDefault();
cancelUploadOl.hide();
mask.setStyle('display', 'none');
closeEvent.detach();
}, '.cancelUploadN'),
confirmEvent = Y.on('click', function (e) {
e.preventDefault();
window.onbeforeunload=null;
window.location.reload();
}, '#cancelUploadY');
mask.setStyle('display', 'block');
cancelUploadOl.centered();
cancelUploadOl.show();
});
//提交信息
videoInfoForm.on('submit', function (e) {
e.preventDefault();
var title = inputTitle.get('value'),
description = inputDescription.get('value'),
tag = inputTag.get('value');
hasFilledIn = true;
if (title) {
videoData.title = title.replace(/<|>/g, '');
}
if (description) {
videoData.description = description.replace(/<|>/g, '');
}
if (tag && tag != '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
videoData.tag = tag.replace(/,/g, ',').replace(/<|>/g, '');
}
if (!isUploadComplete) {
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
videoInfo.one('#titleInfo').setContent(videoData.title);
videoInfo.one('#descriptionInfo').setContent(videoData.description);
videoInfo.one('#tagInfo').setContent(videoData.tag);
videoInfo.one('#typeInfo').setContent(videoType);
} else {
sendvideoForm();
}
});
//继续上传
Y.one('#uploadBtn').on('click', function (e) {
e.preventDefault();
window.location.reload();
});
//修改视频信息
Y.one('#modifyVideoInfo').on('click', function () {
hasFilledIn = false;
videoInfo.addClass('hidden');
videoForm.one('#cancelModify').removeClass('hidden');
videoForm.removeClass('hidden');
});
//取消修改
Y.one('#cancelModify').on('click', function (e) {
e.preventDefault();
hasFilledIn = true;
videoForm.addClass('hidden');
videoInfo.removeClass('hidden');
this.addClass('hidden');
});
/*表单开始*/
//关闭成功提示
Y.one('#closeTip').on('click', function () {
this.ancestor().addClass('hidden');
});
//标题
inputTitle.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//简介
inputDescription.on('focus', function () {
Y.one('#descriptionTips').setStyle('visibility', 'visible');
});
inputDescription.on('blur', function () {
Y.one('#descriptionTips').setStyle('visibility', 'hidden');
});
inputDescription.on('keyup', function () {
var str = this.get('value'),
strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr;
if (strLen > 500) {
for (i = 250; i < strLen; i++) {
newStr = str.substr(0, i).replace(strReg, "aa");
if (newStr.length >= 500) {
this.set('value', str.substr(0, i));
break;
}
};
strLen = 500;
}
wordCount.setContent(Math.floor((500 - strLen) / 2));
});
//标签
inputTag.on('focus', function () {
if (this.get('value') == '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230') {
Y.one('#tagTips').setStyle('visibility', 'visible');
this.removeClass('gray');
this.set('value', '');
}
});
inputTag.on('blur', function () {
if (this.get('value') == '') {
Y.one('#tagTips').setStyle('visibility', 'hidden');
this.addClass('gray');
this.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
}
});
inputTag.on('keyup', function () {
var valueStr=this.get('value'),
cutValueStr=cutString(valueStr,60);
if(valueStr!=cutValueStr){
this.set('value',cutValueStr);
}
});
//类型
Y.on('click', function () {
videoData.type = this.get('value');
videoType = this.next().getContent();
}, '#inputType input');
/*表单结束*/
Y.on('domready', function () {
var cancelAlert = Y.one('#cancelAlert'),
errorAlert = Y.one('#errorAlert');
cancelUploadOl = new Y.Overlay({
srcNode : cancelAlert,
visible : false,
zIndex : 10000,
render : true
});
errorUploadOl = new Y.Overlay({
srcNode : errorAlert,
visible : false,
zIndex : 10000,
render : true
});
cancelAlert.removeClass('hidden');
errorAlert.removeClass('hidden');
});
inputTag.set('value', '\u586b\u5199\u8fd9\u4e2a\u89c6\u9891\u7684\u5173\u952e\u8bcd\uff0c\u4f7f\u4f60\u7684\u89c6\u9891\u66f4\u5bb9\u6613\u88ab\u522b\u4eba\u627e\u5230');
inputDescription.set('value', '');
}
} else {
mask.setStyle('display', 'block');
loginOl.centered();
loginOl.show();
Y.on('click', function () {
if (Y.UA.ie) {
if (history.length == 0) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
} else {
if (history.length == 1) {
window.location.href = "http://v.163.com";
} else {
window.history.back();
}
}
}, '.v-login-close');
}
});
| {
m = Math.floor((scd - h * 3600) / 60);
if (scd - h * 3600 - m * 60 > 0) {
s = scd - h * 3600 - m * 60;
}
};
resultString = h + '\u5c0f\u65f6' + m + '\u5206' + s + '\u79d2';
} else if (scd >= 60) {
m = Math.floor(scd / 60);
if (scd - m * 60 > 0) {
s = scd - m * 60;
};
resultString = m + '\u5206' + s + '\u79d2';
} else {
s = scd;
resultString = s + '\u79d2';
};
return resultString;
}
//截字
function cutString(str,num){
var strReg = /[^x00-xff]/g,
strLen = str.replace(strReg, 'aa').length,
newStr,
fullWi | identifier_body |
salt.go | package saltstack
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"gin-web-demo/conf"
"gin-web-demo/dao"
"gin-web-demo/tools"
"io/ioutil"
"log"
"net/http"
"reflect"
"time"
)
//salt控制器
type SaltController struct {
msg conf.Activestates
}
//获取salt初始化信息
func (s *SaltController) GetToken() (saltinfo conf.Returninfo) {
/*
如果是带有HTTPs的则还需要传递TLS进Client中
*/
//配置请求信息
info := &conf.Info{
Username: conf.Config.Conf.Saltauth[0].Username,
Password: conf.Config.Conf.Saltauth[0].Password,
Eauth: conf.Config.Conf.Saltauth[0].Eauth,
}
//序列化
buf, err := json.Marshal(info)
if !tools.CheckERR(err, "Json Marshal is Failed") {
return saltinfo
}
//新建一个请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL_LOGIN, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Creata New Request") {
return saltinfo
}
//设置请求格式
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("Content-Type", conf.Json_Content_Type)
//新建一个请求
client := http.Client{
Timeout: 3 * time.Second,
}
//创建请求
respon, err := client.Do(re)
if !tools.CheckERR(err, "Create Client Request") {
return saltinfo
}
defer respon.Body.Close()
//读返回信息
body, err := ioutil.ReadAll(respon.Body)
if !tools.CheckERR(err, "ReadALL response Body Failed") {
return saltinfo
}
//反序列化
err = json.Unmarshal(body, &saltinfo)
if !tools.CheckERR(err, "Json Unmarshal Returninfo Failed") {
return saltinfo
}
//fmt.Println(saltinfo)
return saltinfo
}
//异步执行指定的模块
func (s *SaltController) PostModulJob(token string, cmd *conf.JobRunner) *conf.JobReturn {
var (
//临时使用
relist conf.JobReturn
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &relist)
//fmt.Println("infodata=", infodata)
return &relist
}
//同步执行模块
func (s *SaltController) PostRsyncModulJob(token string, cmd *conf.JobRunner) string {
var (
//临时使用
data conf.CheckActive
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &data)
fmt.Println("infodata=", infodata)
//反射出结果
obj := data.Return[0].(map[string]interface{})
//返回对象
result := obj[cmd.Tgt].(string)
fmt.Println("infodata=", result)
return result
}
//公共的POST整理
func pulicPost(token string, para *conf.JobRunner) (response *http.Response) {
//构建json参数
cmd := &conf.JobRunner{
Client: para.Client,
Tgt: para.Tgt,
Fun: para.Fun,
Arg: para.Arg,
Expr_form: para.Expr_form,
}
//Object序列化
data, err := json.Marshal(cmd)
if !tools.CheckERR(err, "PostModulJob Object json marshal Is Failed") {
return response
}
log.Printf("cmd=%s,序列化后=%s\n", cmd, string(data))
//新建请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL, bytes.NewBuffer(data))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return response
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
re.Header.Set("Content-Type", conf.Json_Content_Type)
log.Printf("re.body=%s\n", re.Body)
//fmt.Println(re,"conf.Config.Conf.URL=",conf.Config.Conf.URL)
//新建Client
client := http.Client{}
//请求对端
response, err = client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return
}
return response
}
//执行Job任务查询
func (s *SaltController) QueryJob(jobid string, token string) conf.JobInfo {
var (
buf []byte
result conf.JobInfo
)
//新建请求
re, err := http.NewRequest("GET", conf.Config.Conf.URL_JOBS+"/"+jobid, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return result
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
//re.Header.Set("Content-Type", conf.Json_Content_Type)
//fmt.Println(re)
//新建Client
client := http.Client{}
//请求对端
response, err := client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return result
}
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
//反序列化
json.Unmarshal(infodata, &result)
if !tools.CheckERR(err, "JobResult Unmarshal is Failed") {
return result
}
//fmt.Println("序列化后的数据", infodata)
return result
}
//返回任务的最终执行结果
func (s *SaltController) ReturnResult(jid string) string {
//获取数据源
data := reddao.GetDate(jid)
return data
}
//获取CMDB的认证Token
func (s *SaltController) GetCMDBAUTH() error {
var obj conf.TokenCmdb
//构建对象
auth := &conf.AuthCmdb{
UserName: conf.Config.Conf.Ldap_user,
PassWord: tools.GetLdapPasswd(conf.Config.Conf.Ldap_passwd),
}
//序列化
au, err := json.Marshal(auth)
if !tools.CheckERR(err, "") {
return err
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.CMDB_api, bytes.NewBuffer(au))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
//读信息
infodata, _ := ioutil.ReadAll(repon.Body)
tools.CheckERR(err, "Request CMDB IS Failed")
//反序列化
err = json.Unmarshal(infodata, &obj)
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
log.Printf("AuthToken获取回来的消息为=%s\n", time.Now().Format("2006-01-02 15:04:05"), string(infodata))
//存数据库
err = dao.RedisHandle{}.InsertTTLData("AuthToken", obj.Token, "EX", "18000")
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
return err
}
type Ip struct {
IP string `json:"ip"`
}
//查询CMDB的接口
func (s *SaltController) GetCMDBInfo(ips string) (string, error) {
var (
retruninfo conf.Retuencmdb
token string
)
//取token信息
if token = reddao.GetDate("AuthToken"); len(token) < 0 {
return "", errors.New("Get CMDB AuthToken is Failed,Please check !")
}
//构建参数
ip := &Ip{IP: ips}
buf, err := json.Marshal(&ip)
if !tools.CheckERR(err, "New CMDB Request URL IS Failed") {
return "", errors.New("ip参数序列化失败请检查")
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.Cmdb_infoapi, bytes.NewBuffer(buf))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "JWT"+" "+token)
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
tools.CheckERR(err, "Request CMDB IS Failed")
info, _ := ioutil.ReadAll(repon.Body)
json.Unmarshal(info, &retruninfo)
if r | DB minion Address,Please check request!")
}
//log.Printf("JWT=%s,retruninfo=%s,error=%s\n",token,retruninfo,err.Error())
return retruninfo.Data.IPgroup, err
}
//salt-minion存活检测
func (s *SaltController) ActiveSalt(address string) (bool, string) {
//获取token信息
token, err := s.Check()
fmt.Println("请求进来了", address, "", token, err)
if !tools.CheckERR(err, "获取token失败") {
return false, fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
fmt.Println("请求进来了", token)
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
fmt.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
return false, fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
log.Printf("ActiveSalt返回信息为=%s\n", check)
//防止越界
//if reflect.ValueOf(check.Return).IsNil()||reflect.ValueOf(check.Return).IsValid(){
// return false,errors.New("发生未知错误,数组越界")
//}
//断言类型转换换
checks, ok := check.Return[0].(map[string]interface{})
//if !ok {
// return false,errors.New("发生未知错误,数组越界")
//}
//if len(checks) < 1 {
// fmt.Println("checks len is =", len(checks))
// return false, errors.New("该salt-minion不存在!")
//}
//if !checks[address].(bool) {
// //是否存活
// conf.WriteLog(fmt.Sprintf("%s[salt-check]存活检测失败状态为=%s\n", tools.GetTimeNow(), check))
// return false, errors.New("salt-minion死亡状态!请检查")
//}
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
fmt.Println("checks len is =", len(checks))
return false, "该salt-minion不存在!"
case !checks[address].(bool):
//是否存活
log.Printf("存活检测失败状态为=%s\n", tools.GetTimeNow(), check)
return false, "salt-minion死亡状态!请检查"
case !ok:
return false, "发生未知错误,数组越界"
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
return false, "发生未知错误,数组越界"
}
log.Printf("判断结果的错误信息为Err=%s\n", err)
return true, "salt-minion存活ping通畅!"
}
//salt-minion存活检测
func (s *SaltController) ActiveSalttest(ctx context.Context, address string, database *conf.AllMessage) {
//获取token信息
token, err := s.Check()
if !tools.CheckERR(err, "获取token失败") {
fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
log.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
checks, ok := check.Return[0].(map[string]interface{})
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
s.msg.Msg = conf.Error_notActive
s.msg.States = false
case !checks[address].(bool):
//是否存活
log.Printf("salt-check存活检测失败状态为=%s\n", check)
s.msg.Msg = conf.Error_Delth
s.msg.States = false
case !ok:
s.msg.Msg = conf.Error_breakarry
s.msg.States = false
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
s.msg.Msg = conf.Error_Active
s.msg.States = true
}
s.msg.Address = address
//conf.WriteLog(fmt.Sprintf("%s[Return]判断结果的错误信息为Err=%s\n", time.Now().Format("2006-01-02 15:04:05"), s.msg))
log.Printf("最终判断结果为=%s\n", s.msg)
database.Activechan <- s.msg
}
//salt-Token调用检测
func (s *SaltController) Check() (tokens string, err error) {
//获取Token信息
if tokens = reddao.GetDate("token"); tokens == "" {
fmt.Println("请求执行到获取token了")
tokens = s.GetToken().Return[0].Token
err = reddao.InsertTTLData("token", tokens, "EX", "3600")
if !tools.CheckERR(err, "Inserter Token Failed") {
return
}
log.Printf("获取Token信息=%s\n", tokens)
}
//fmt.Println("请求返回获取token了")
return
}
| etruninfo.Code != 00000 {
return "", errors.New("Dont's Get CM | conditional_block |
salt.go | package saltstack
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"gin-web-demo/conf"
"gin-web-demo/dao"
"gin-web-demo/tools"
"io/ioutil"
"log"
"net/http"
"reflect"
"time"
)
//salt控制器
type SaltController struct {
msg conf.Activestates
}
//获取salt初始化信息
func (s *SaltController) GetToken() (saltinfo conf.Returninfo) {
/*
如果是带有HTTPs的则还需要传递TLS进Client中
*/
//配置请求信息
info := &conf.Info{
Username: conf.Config.Conf.Saltauth[0].Username,
Password: conf.Config.Conf.Saltauth[0].Password,
Eauth: conf.Config.Conf.Saltauth[0].Eauth,
}
//序列化
buf, err := json.Marshal(info)
if !tools.CheckERR(err, "Json Marshal is Failed") {
return saltinfo
}
//新建一个请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL_LOGIN, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Creata New Request") {
return saltinfo
}
//设置请求格式
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("Content-Type", conf.Json_Content_Type)
//新建一个请求
client := http.Client{
Timeout: 3 * time.Second,
}
//创建请求
respon, err := client.Do(re)
if !tools.CheckERR(err, "Create Client Request") {
return saltinfo
}
defer respon.Body.Close()
//读返回信息
body, err := ioutil.ReadAll(respon.Body)
if !tools.CheckERR(err, "ReadALL response Body Failed") {
return saltinfo
}
//反序列化
err = json.Unmarshal(body, &saltinfo)
if !tools.CheckERR(err, "Json Unmarshal Returninfo Failed") {
return saltinfo
}
//fmt.Println(saltinfo)
return saltinfo
}
//异步执行指定的模块
func (s *SaltController) PostModulJob(token string, cmd *conf.JobRunner) *conf.JobReturn {
var (
//临时使用
relist conf.JobReturn
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &relist)
//fmt.Println("infodata=", infodata)
return &relist
}
//同步执行模块
func (s *SaltController) PostRsyncModulJob(token string, cmd *conf.JobRunner) string {
var (
//临时使用
data conf.CheckActive
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &data)
fmt.Println("infodata=", infodata)
//反射出结果
obj := data.Return[0].(map[string]interface{})
//返回对象
result := obj[cmd.Tgt].(string)
fmt.Println("infodata=", result)
return result
}
//公共的POST整理
func pulicPost(token string, para *conf.JobRunner) (response *http.Response) {
//构建json参数
cmd := &conf.JobRunner{
Client: para.Client,
Tgt: para.Tgt,
Fun: para.Fun,
Arg: para.Arg,
Expr_form: para.Expr_form,
}
//Object序列化
data, err := json.Marshal(cmd)
if !tools.CheckERR(err, "PostModulJob Object json marshal Is Failed") {
return response
}
log.Printf("cmd=%s,序列化后=%s\n", cmd, string(data))
//新建请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL, bytes.NewBuffer(data))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return response
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
re.Header.Set("Content-Type", conf.Json_Content_Type)
log.Printf("re.body=%s\n", re.Body)
//fmt.Println(re,"conf.Config.Conf.URL=",conf.Config.Conf.URL)
//新建Client
client := http.Client{}
//请求对端
response, err = client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return
}
return response
}
//执行Job任务查询
func (s *SaltController) QueryJob(jobid string, token string) conf.JobInfo {
var (
buf []byte
result conf.JobInfo
)
//新建请求
re, err := http.NewRequest("GET", conf.Config.Conf.URL_JOBS+"/"+jobid, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return result
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
//re.Header.Set("Content-Type", conf.Json_Content_Type)
//fmt.Println(re)
//新建Client
client := http.Client{}
//请求对端
response, err := client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return result
}
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
//反序列化
json.Unmarshal(infodata, &result)
if !tools.CheckERR(err, "JobResult Unmarshal is Failed") {
return result
}
//fmt.Println("序列化后的数据", infodata)
return result
}
//返回任务的最终执行结果
func (s *SaltController) ReturnResult(jid string) string {
//获取数据源
data := reddao.GetDate(jid)
return data
}
//获取CMDB的认证Token
func (s *SaltController) GetCMDBAUTH() error {
var obj conf.TokenCmdb
//构建对象
auth := &conf.AuthCmdb{
UserName: conf.Config.Conf.Ldap_user,
PassWord: tools.GetLdapPasswd(conf.Config.Conf.Ldap_passwd),
}
//序列化
au, err := json.Marshal(auth)
if !tools.CheckERR(err, "") {
return err
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.CMDB_api, bytes.NewBuffer(au))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
//读信息
infodata, _ := ioutil.ReadAll(repon.Body)
tools.CheckERR(err, "Request CMDB IS Failed")
//反序列化
err = json.Unmarshal(infodata, &obj)
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
log.Printf("AuthToken获取回来的消息为=%s\n", time.Now().Format("2006-01-02 15:04:05"), string(infodata))
//存数据库
err = dao.RedisHandle{}.InsertTTLData("AuthToken", obj.Token, "EX", "18000")
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
return err
}
type Ip struct {
IP string `json:"ip"`
}
//查询CMDB的接口
func (s *SaltController) GetCMDBInfo(ips string) (string, error) {
var (
retruninfo conf.Retuencmdb
token string
)
//取token信息
if token = reddao.GetDate("AuthToken"); len(token) < 0 {
return "", errors.New("Get CMDB AuthToken is Failed,Please check !")
}
//构建参数
ip := &Ip{IP: ips}
buf, err := json.Marshal(&ip)
if !tools.CheckERR(err, "New CMDB Request URL IS Failed") {
return "", errors.New("ip参数序列化失败请检查")
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.Cmdb_infoapi, bytes.NewBuffer(buf))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "JWT"+" "+token)
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
tools.CheckERR(err, "Request CMDB IS Failed")
info, _ := ioutil.ReadAll(repon.Body)
json.Unmarshal(info, &retruninfo)
if retruninfo.Code != 00000 {
return "", errors.New("Dont's Get CMDB minion Address,Please check request!")
}
//log.Printf("JWT=%s,retruninfo=%s,error=%s\n",token,retruninfo,err.Error())
return retruninfo.Data.IPgroup, err
}
//salt-minion存活检测
func (s *SaltController) ActiveSalt(address string) (bool, string) {
//获取token信息
token, err := s.Check()
fmt.Println("请求进来了", address, "", token, err)
if !tools.CheckERR(err, "获取token失败") {
return false, fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
fmt.Println("请求进来了", token)
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
fmt.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
return false, fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
log.Printf("ActiveSalt返回信息为=%s\n", check)
//防止越界
//if reflect.ValueOf(check.Return).IsNil()||reflect.ValueOf(check.Return).IsValid(){
// return false,errors.New("发生未知错误,数组越界")
//}
//断言类型转换换
checks, ok := check.Return[0].(map[string]interface{})
//if !ok {
// return false,errors.New("发生未知错误,数组越界")
//}
//if len(checks) < 1 {
// fmt.Println("checks len is =", len(checks))
// return false, errors.New("该salt-minion不存在!")
//}
//if !checks[address].(bool) {
// //是否存活
// conf.WriteLog(fmt.Sprintf("%s[salt-check]存活检测失败状态为=%s\n", tools.GetTimeNow(), check))
// return false, errors.New("salt-minion死亡状态!请检查")
//}
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
fmt.Println("checks len is =", len(checks))
return false, "该salt-minion不存在!"
case !checks[address].(bool):
//是否存活
log.Printf("存活检测失败状态为=%s\n", tools.GetTimeNow(), check)
return false, "salt-minion死亡状态!请检查"
case !ok:
return false, "发生未知错误,数组越界"
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
return false, "发生未知错误,数组越界"
}
log.Printf("判断结果的错误信息为Err=%s\n", err)
return true, "salt-minion存活ping通畅!"
}
//salt-minion存活检测
func (s *SaltController) ActiveSalttest(ctx context.Context, address string, database *conf.AllMessage) {
//获取token信息
token, err := s.Check()
if !tools.CheckERR(err, "获取token失败") {
fmt.Sprintf("内部获取token失败,ERROR=%s", err)
} | Tgt: address,
Fun: "test.ping",
}
log.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
checks, ok := check.Return[0].(map[string]interface{})
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
s.msg.Msg = conf.Error_notActive
s.msg.States = false
case !checks[address].(bool):
//是否存活
log.Printf("salt-check存活检测失败状态为=%s\n", check)
s.msg.Msg = conf.Error_Delth
s.msg.States = false
case !ok:
s.msg.Msg = conf.Error_breakarry
s.msg.States = false
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
s.msg.Msg = conf.Error_Active
s.msg.States = true
}
s.msg.Address = address
//conf.WriteLog(fmt.Sprintf("%s[Return]判断结果的错误信息为Err=%s\n", time.Now().Format("2006-01-02 15:04:05"), s.msg))
log.Printf("最终判断结果为=%s\n", s.msg)
database.Activechan <- s.msg
}
//salt-Token调用检测
func (s *SaltController) Check() (tokens string, err error) {
//获取Token信息
if tokens = reddao.GetDate("token"); tokens == "" {
fmt.Println("请求执行到获取token了")
tokens = s.GetToken().Return[0].Token
err = reddao.InsertTTLData("token", tokens, "EX", "3600")
if !tools.CheckERR(err, "Inserter Token Failed") {
return
}
log.Printf("获取Token信息=%s\n", tokens)
}
//fmt.Println("请求返回获取token了")
return
} | //构建json参数
cmd := &conf.JobRunner{
Client: "local", | random_line_split |
salt.go | package saltstack
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"gin-web-demo/conf"
"gin-web-demo/dao"
"gin-web-demo/tools"
"io/ioutil"
"log"
"net/http"
"reflect"
"time"
)
//salt控制器
type SaltController struct {
msg conf.Activestates
}
//获取salt初始化信息
func (s *SaltController) GetToken() (saltinfo conf.Returninfo) {
/*
如果是带有HTTPs的则还需要传递TLS进Client中
*/
//配置请求信息
info := &conf.Info{
Username: conf.Config.Conf.Saltauth[0].Username,
Password: conf.Config.Conf.Saltauth[0].Password,
Eauth: conf.Config.Conf.Saltauth[0].Eauth,
}
//序列化
buf, err := json.Marshal(info)
if !tools.CheckERR(err, "Json Marshal is Failed") {
return saltinfo
}
//新建一个请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL_LOGIN, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Creata New Request") {
return saltinfo
}
//设置请求格式
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("Content-Type", conf.Json_Content_Type)
//新建一个请求
client := http.Client{
Timeout: 3 * time.Second,
}
//创建请求
respon, err := client.Do(re)
if !tools.CheckERR(err, "Create Client Request") {
return saltinfo
}
defer respon.Body.Close()
//读返回信息
body, err := ioutil.ReadAll(respon.Body)
if !tools.CheckERR(err, "ReadALL response Body Failed") {
return saltinfo
}
//反序列化
err = json.Unmarshal(body, &saltinfo)
if !tools.CheckERR(err, "Json Unmarshal Returninfo Failed") {
return saltinfo
}
//fmt.Println(saltinfo)
return saltinfo
}
//异步执行指定的模块
func (s *SaltController) PostModulJob(token string, cmd *conf.JobRunner) *conf.JobReturn {
var (
//临时使用
relist conf.JobReturn
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &relist)
//fmt.Println("infodata=", infodata)
return &relist
}
//同步执行模块
func (s *SaltController) PostRsyncModulJob(token string, cmd *conf.JobRunner) string {
var (
//临时使用
data conf.CheckActive
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &data)
fmt.Println("infodata=", infodata)
//反射出结果
obj := data.Return[0].(map[string]interface{})
//返回对象
result := obj[cmd.Tgt].(string)
fmt.Println("infodata=", result)
return result
}
//公共的POST整理
func pulicPost(token string, para *conf.JobRunner) (response *http.Response) {
//构建json参数
cmd := &conf.JobRunner{
Client: para.Client,
Tgt: para.Tgt,
Fun: para.Fun,
Arg: para.Arg,
Expr_form: para.Expr_form,
}
//Object序列化
data, err := json.Marshal(cmd)
if !tools.CheckERR(err, "PostModulJob Object json marshal Is Failed") {
return response
}
log.Printf("cmd=%s,序列化后=%s\n", cmd, string(data))
//新建请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL, bytes.NewBuffer(data))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return response
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
re.Header.Set("Content-Type", conf.Json_Content_Type)
log.Printf("re.body=%s\n", re.Body)
//fmt.Println(re,"conf.Config.Conf.URL=",conf.Config.Conf.URL)
//新建Client
client := http.Client{}
//请求对端
response, err = client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return
}
return response
}
//执行Job任务查询
func (s *SaltController) QueryJob(jobid string, token string) conf.JobInfo {
var (
buf []byte
result conf.JobInfo
)
//新建请求
re, err := http.NewRequest("GET", conf.Config.Conf.URL_JOBS+"/"+jobid, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return result
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
//re.Header.Set("Content-Type", conf.Json_Content_Type)
//fmt.Println(re)
//新建Client
client := http.Client{}
//请求对端
response, err := client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return result
}
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
//反序列化
json.Unmarshal(infodata, &result)
if !tools.CheckERR(err, "JobResult Unmarshal is Failed") {
return result
}
//fmt.Println("序列化后的数据", infodata)
return result
}
//返回任务的最终执行结果
func (s *SaltController) ReturnResult(jid string) string {
//获取数据源
data := reddao.GetDate(jid)
return data
}
//获取CMDB的认证Token
func (s *SaltController) GetCMDBAUTH() error {
var obj conf.TokenCmdb
//构建对象
auth := &conf.AuthCmdb{
UserName: conf.Config.Conf.Ldap_user,
PassWord: tools.GetLdapPasswd(conf.Config.Conf.Ldap_passwd),
}
//序列化
au, err := json.Marshal(auth)
if !tools.CheckERR(err, "") {
return err
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.CMDB_api, bytes.NewBuffer(au))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
//读信息
infodata, _ := ioutil.ReadAll(repon.Body)
tools.CheckERR(err, "Request CMDB IS Failed")
//反序列化
err = json.Unmarshal(infodata, &obj)
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
log.Printf("AuthToken获取回来的消息为=%s\n", time.Now().Format("2006-01-02 15:04:05"), string(infodata))
//存数据库
err = dao.RedisHandle{}.InsertTTLData("AuthToken", obj.Token, "EX", "18000")
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
return err
}
type Ip struct {
IP string `json:"ip"`
}
//查询CMDB的接口
func (s *SaltController) GetCMDBInfo(ips string) (string, error) {
var (
retruninfo conf.Retuencmdb
token string
)
//取token信息
if token = reddao.GetDate("AuthToken"); len(token) < 0 {
return "", errors.New("Get CMDB AuthToken is Failed,Please check !")
}
//构建参数
ip := &Ip{IP: ips}
buf, err := json.Marshal(&ip)
if !tools.CheckERR(err, "New CMDB Request URL IS Failed") {
return "", errors.New("ip参数序列化失败请检查")
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.Cmdb_infoapi, bytes.NewBuffer(buf))
tools.CheckERR(err, "New | ad checkactive is Failed") {
return false, fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
log.Printf("ActiveSalt返回信息为=%s\n", check)
//防止越界
//if reflect.ValueOf(check.Return).IsNil()||reflect.ValueOf(check.Return).IsValid(){
// return false,errors.New("发生未知错误,数组越界")
//}
//断言类型转换换
checks, ok := check.Return[0].(map[string]interface{})
//if !ok {
// return false,errors.New("发生未知错误,数组越界")
//}
//if len(checks) < 1 {
// fmt.Println("checks len is =", len(checks))
// return false, errors.New("该salt-minion不存在!")
//}
//if !checks[address].(bool) {
// //是否存活
// conf.WriteLog(fmt.Sprintf("%s[salt-check]存活检测失败状态为=%s\n", tools.GetTimeNow(), check))
// return false, errors.New("salt-minion死亡状态!请检查")
//}
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
fmt.Println("checks len is =", len(checks))
return false, "该salt-minion不存在!"
case !checks[address].(bool):
//是否存活
log.Printf("存活检测失败状态为=%s\n", tools.GetTimeNow(), check)
return false, "salt-minion死亡状态!请检查"
case !ok:
return false, "发生未知错误,数组越界"
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
return false, "发生未知错误,数组越界"
}
log.Printf("判断结果的错误信息为Err=%s\n", err)
return true, "salt-minion存活ping通畅!"
}
//salt-minion存活检测
func (s *SaltController) ActiveSalttest(ctx context.Context, address string, database *conf.AllMessage) {
//获取token信息
token, err := s.Check()
if !tools.CheckERR(err, "获取token失败") {
fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
log.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
checks, ok := check.Return[0].(map[string]interface{})
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
s.msg.Msg = conf.Error_notActive
s.msg.States = false
case !checks[address].(bool):
//是否存活
log.Printf("salt-check存活检测失败状态为=%s\n", check)
s.msg.Msg = conf.Error_Delth
s.msg.States = false
case !ok:
s.msg.Msg = conf.Error_breakarry
s.msg.States = false
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
s.msg.Msg = conf.Error_Active
s.msg.States = true
}
s.msg.Address = address
//conf.WriteLog(fmt.Sprintf("%s[Return]判断结果的错误信息为Err=%s\n", time.Now().Format("2006-01-02 15:04:05"), s.msg))
log.Printf("最终判断结果为=%s\n", s.msg)
database.Activechan <- s.msg
}
//salt-Token调用检测
func (s *SaltController) Check() (tokens string, err error) {
//获取Token信息
if tokens = reddao.GetDate("token"); tokens == "" {
fmt.Println("请求执行到获取token了")
tokens = s.GetToken().Return[0].Token
err = reddao.InsertTTLData("token", tokens, "EX", "3600")
if !tools.CheckERR(err, "Inserter Token Failed") {
return
}
log.Printf("获取Token信息=%s\n", tokens)
}
//fmt.Println("请求返回获取token了")
return
}
| CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "JWT"+" "+token)
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
tools.CheckERR(err, "Request CMDB IS Failed")
info, _ := ioutil.ReadAll(repon.Body)
json.Unmarshal(info, &retruninfo)
if retruninfo.Code != 00000 {
return "", errors.New("Dont's Get CMDB minion Address,Please check request!")
}
//log.Printf("JWT=%s,retruninfo=%s,error=%s\n",token,retruninfo,err.Error())
return retruninfo.Data.IPgroup, err
}
//salt-minion存活检测
func (s *SaltController) ActiveSalt(address string) (bool, string) {
//获取token信息
token, err := s.Check()
fmt.Println("请求进来了", address, "", token, err)
if !tools.CheckERR(err, "获取token失败") {
return false, fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
fmt.Println("请求进来了", token)
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
fmt.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "re | identifier_body |
salt.go | package saltstack
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"gin-web-demo/conf"
"gin-web-demo/dao"
"gin-web-demo/tools"
"io/ioutil"
"log"
"net/http"
"reflect"
"time"
)
//salt控制器
type SaltController struct {
msg conf.Activestates
}
//获取salt初始化信息
func (s *SaltController) GetToken() (saltinfo | turninfo) {
/*
如果是带有HTTPs的则还需要传递TLS进Client中
*/
//配置请求信息
info := &conf.Info{
Username: conf.Config.Conf.Saltauth[0].Username,
Password: conf.Config.Conf.Saltauth[0].Password,
Eauth: conf.Config.Conf.Saltauth[0].Eauth,
}
//序列化
buf, err := json.Marshal(info)
if !tools.CheckERR(err, "Json Marshal is Failed") {
return saltinfo
}
//新建一个请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL_LOGIN, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Creata New Request") {
return saltinfo
}
//设置请求格式
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("Content-Type", conf.Json_Content_Type)
//新建一个请求
client := http.Client{
Timeout: 3 * time.Second,
}
//创建请求
respon, err := client.Do(re)
if !tools.CheckERR(err, "Create Client Request") {
return saltinfo
}
defer respon.Body.Close()
//读返回信息
body, err := ioutil.ReadAll(respon.Body)
if !tools.CheckERR(err, "ReadALL response Body Failed") {
return saltinfo
}
//反序列化
err = json.Unmarshal(body, &saltinfo)
if !tools.CheckERR(err, "Json Unmarshal Returninfo Failed") {
return saltinfo
}
//fmt.Println(saltinfo)
return saltinfo
}
//异步执行指定的模块
func (s *SaltController) PostModulJob(token string, cmd *conf.JobRunner) *conf.JobReturn {
var (
//临时使用
relist conf.JobReturn
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &relist)
//fmt.Println("infodata=", infodata)
return &relist
}
//同步执行模块
func (s *SaltController) PostRsyncModulJob(token string, cmd *conf.JobRunner) string {
var (
//临时使用
data conf.CheckActive
)
//调用构造函数
response := pulicPost(token, cmd)
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
json.Unmarshal(infodata, &data)
fmt.Println("infodata=", infodata)
//反射出结果
obj := data.Return[0].(map[string]interface{})
//返回对象
result := obj[cmd.Tgt].(string)
fmt.Println("infodata=", result)
return result
}
//公共的POST整理
func pulicPost(token string, para *conf.JobRunner) (response *http.Response) {
//构建json参数
cmd := &conf.JobRunner{
Client: para.Client,
Tgt: para.Tgt,
Fun: para.Fun,
Arg: para.Arg,
Expr_form: para.Expr_form,
}
//Object序列化
data, err := json.Marshal(cmd)
if !tools.CheckERR(err, "PostModulJob Object json marshal Is Failed") {
return response
}
log.Printf("cmd=%s,序列化后=%s\n", cmd, string(data))
//新建请求
re, err := http.NewRequest("POST", conf.Config.Conf.URL, bytes.NewBuffer(data))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return response
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
re.Header.Set("Content-Type", conf.Json_Content_Type)
log.Printf("re.body=%s\n", re.Body)
//fmt.Println(re,"conf.Config.Conf.URL=",conf.Config.Conf.URL)
//新建Client
client := http.Client{}
//请求对端
response, err = client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return
}
return response
}
//执行Job任务查询
func (s *SaltController) QueryJob(jobid string, token string) conf.JobInfo {
var (
buf []byte
result conf.JobInfo
)
//新建请求
re, err := http.NewRequest("GET", conf.Config.Conf.URL_JOBS+"/"+jobid, bytes.NewBuffer(buf))
if !tools.CheckERR(err, "Create PostModulJob Request Failed") {
return result
}
defer re.Body.Close()
//设置请求头
re.Header.Set("Accept", conf.Json_Accept)
re.Header.Set("X-Auth-Token", token)
//re.Header.Set("Content-Type", conf.Json_Content_Type)
//fmt.Println(re)
//新建Client
client := http.Client{}
//请求对端
response, err := client.Do(re)
if !tools.CheckERR(err, "PostModulJob Client Request is Failed") {
return result
}
//读信息
infodata, _ := ioutil.ReadAll(response.Body)
//反序列化
json.Unmarshal(infodata, &result)
if !tools.CheckERR(err, "JobResult Unmarshal is Failed") {
return result
}
//fmt.Println("序列化后的数据", infodata)
return result
}
//返回任务的最终执行结果
func (s *SaltController) ReturnResult(jid string) string {
//获取数据源
data := reddao.GetDate(jid)
return data
}
//获取CMDB的认证Token
func (s *SaltController) GetCMDBAUTH() error {
var obj conf.TokenCmdb
//构建对象
auth := &conf.AuthCmdb{
UserName: conf.Config.Conf.Ldap_user,
PassWord: tools.GetLdapPasswd(conf.Config.Conf.Ldap_passwd),
}
//序列化
au, err := json.Marshal(auth)
if !tools.CheckERR(err, "") {
return err
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.CMDB_api, bytes.NewBuffer(au))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
//读信息
infodata, _ := ioutil.ReadAll(repon.Body)
tools.CheckERR(err, "Request CMDB IS Failed")
//反序列化
err = json.Unmarshal(infodata, &obj)
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
log.Printf("AuthToken获取回来的消息为=%s\n", time.Now().Format("2006-01-02 15:04:05"), string(infodata))
//存数据库
err = dao.RedisHandle{}.InsertTTLData("AuthToken", obj.Token, "EX", "18000")
tools.CheckERR(err, "json Unmarshal CMDB IS Failed")
return err
}
type Ip struct {
IP string `json:"ip"`
}
//查询CMDB的接口
func (s *SaltController) GetCMDBInfo(ips string) (string, error) {
var (
retruninfo conf.Retuencmdb
token string
)
//取token信息
if token = reddao.GetDate("AuthToken"); len(token) < 0 {
return "", errors.New("Get CMDB AuthToken is Failed,Please check !")
}
//构建参数
ip := &Ip{IP: ips}
buf, err := json.Marshal(&ip)
if !tools.CheckERR(err, "New CMDB Request URL IS Failed") {
return "", errors.New("ip参数序列化失败请检查")
}
//构建连接
req, err := http.NewRequest("POST", conf.Config.Conf.Cmdb_infoapi, bytes.NewBuffer(buf))
tools.CheckERR(err, "New CMDB Request URL IS Failed")
//设置request
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "JWT"+" "+token)
//请求连接等待返回
client := http.Client{}
repon, err := client.Do(req)
tools.CheckERR(err, "Request CMDB IS Failed")
info, _ := ioutil.ReadAll(repon.Body)
json.Unmarshal(info, &retruninfo)
if retruninfo.Code != 00000 {
return "", errors.New("Dont's Get CMDB minion Address,Please check request!")
}
//log.Printf("JWT=%s,retruninfo=%s,error=%s\n",token,retruninfo,err.Error())
return retruninfo.Data.IPgroup, err
}
//salt-minion存活检测
func (s *SaltController) ActiveSalt(address string) (bool, string) {
//获取token信息
token, err := s.Check()
fmt.Println("请求进来了", address, "", token, err)
if !tools.CheckERR(err, "获取token失败") {
return false, fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
fmt.Println("请求进来了", token)
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
fmt.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
return false, fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
log.Printf("ActiveSalt返回信息为=%s\n", check)
//防止越界
//if reflect.ValueOf(check.Return).IsNil()||reflect.ValueOf(check.Return).IsValid(){
// return false,errors.New("发生未知错误,数组越界")
//}
//断言类型转换换
checks, ok := check.Return[0].(map[string]interface{})
//if !ok {
// return false,errors.New("发生未知错误,数组越界")
//}
//if len(checks) < 1 {
// fmt.Println("checks len is =", len(checks))
// return false, errors.New("该salt-minion不存在!")
//}
//if !checks[address].(bool) {
// //是否存活
// conf.WriteLog(fmt.Sprintf("%s[salt-check]存活检测失败状态为=%s\n", tools.GetTimeNow(), check))
// return false, errors.New("salt-minion死亡状态!请检查")
//}
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
fmt.Println("checks len is =", len(checks))
return false, "该salt-minion不存在!"
case !checks[address].(bool):
//是否存活
log.Printf("存活检测失败状态为=%s\n", tools.GetTimeNow(), check)
return false, "salt-minion死亡状态!请检查"
case !ok:
return false, "发生未知错误,数组越界"
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
return false, "发生未知错误,数组越界"
}
log.Printf("判断结果的错误信息为Err=%s\n", err)
return true, "salt-minion存活ping通畅!"
}
//salt-minion存活检测
func (s *SaltController) ActiveSalttest(ctx context.Context, address string, database *conf.AllMessage) {
//获取token信息
token, err := s.Check()
if !tools.CheckERR(err, "获取token失败") {
fmt.Sprintf("内部获取token失败,ERROR=%s", err)
}
//构建json参数
cmd := &conf.JobRunner{
Client: "local",
Tgt: address,
Fun: "test.ping",
}
log.Printf("token=%s,cmd=%s\n", token, cmd)
//请求对端
obj := pulicPost(token, cmd)
data, err := ioutil.ReadAll(obj.Body)
if !tools.CheckERR(err, "read checkactive is Failed") {
fmt.Sprintf("读取ioutil失败,ERROR=%s", err)
}
check := &conf.CheckActive{}
err = json.Unmarshal(data, check)
tools.CheckERR(err, "ActiveCheck json unmarshal is failed!")
checks, ok := check.Return[0].(map[string]interface{})
//(只要满足以上3种情况其一)均为无效值
switch {
case len(checks) < 1:
s.msg.Msg = conf.Error_notActive
s.msg.States = false
case !checks[address].(bool):
//是否存活
log.Printf("salt-check存活检测失败状态为=%s\n", check)
s.msg.Msg = conf.Error_Delth
s.msg.States = false
case !ok:
s.msg.Msg = conf.Error_breakarry
s.msg.States = false
case reflect.ValueOf(check.Return).IsNil() || reflect.ValueOf(check.Return).IsValid():
s.msg.Msg = conf.Error_Active
s.msg.States = true
}
s.msg.Address = address
//conf.WriteLog(fmt.Sprintf("%s[Return]判断结果的错误信息为Err=%s\n", time.Now().Format("2006-01-02 15:04:05"), s.msg))
log.Printf("最终判断结果为=%s\n", s.msg)
database.Activechan <- s.msg
}
//salt-Token调用检测
func (s *SaltController) Check() (tokens string, err error) {
//获取Token信息
if tokens = reddao.GetDate("token"); tokens == "" {
fmt.Println("请求执行到获取token了")
tokens = s.GetToken().Return[0].Token
err = reddao.InsertTTLData("token", tokens, "EX", "3600")
if !tools.CheckERR(err, "Inserter Token Failed") {
return
}
log.Printf("获取Token信息=%s\n", tokens)
}
//fmt.Println("请求返回获取token了")
return
}
| conf.Re | identifier_name |
dbStateManager.go | // Copyright 2015 FactomProject Authors. All rights reserved.
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package state
import (
"encoding/hex"
"fmt"
"github.com/FactomProject/factomd/common/interfaces"
"github.com/FactomProject/factomd/common/messages"
"github.com/FactomProject/factomd/log"
"time"
)
var _ = hex.EncodeToString
var _ = fmt.Print
var _ = time.Now()
var _ = log.Print
type DBState struct {
isNew bool
DBHash interfaces.IHash
ABHash interfaces.IHash
FBHash interfaces.IHash
ECHash interfaces.IHash
DirectoryBlock interfaces.IDirectoryBlock
AdminBlock interfaces.IAdminBlock
FactoidBlock interfaces.IFBlock
EntryCreditBlock interfaces.IEntryCreditBlock
Saved bool
}
type DBStateList struct {
LastTime interfaces.Timestamp
SecondsBetweenTests int
Lastreq int
State *State
Base uint32
Complete uint32
DBStates []*DBState
}
const SecondsBetweenTests = 3 // Default
func (list *DBStateList) String() string {
str := "\nDBStates\n"
str = fmt.Sprintf("%s Base = %d\n", str, list.Base)
str = fmt.Sprintf("%s timestamp = %s\n", str, list.LastTime.String())
str = fmt.Sprintf("%s Complete = %d\n", str, list.Complete)
rec := "M"
last := ""
for i, ds := range list.DBStates {
rec = "M"
if ds != nil && ds.DirectoryBlock != nil {
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByHash(ds.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk != nil {
rec = "R"
}
if ds.Saved {
rec = "S"
}
}
if last != "" {
str = last
}
str = fmt.Sprintf("%s %1s-DState\n DState Height: %d\n%s", str, rec, list.Base+uint32(i), ds.String())
if rec == "M" && last == "" {
last = str
}
}
return str
}
func (ds *DBState) String() string {
str := ""
if ds == nil {
str = " DBState = <nil>\n"
} else if ds.DirectoryBlock == nil {
str = " Directory Block = <nil>\n"
} else {
str = fmt.Sprintf("%s DBlk Height = %v\n", str, ds.DirectoryBlock.GetHeader().GetDBHeight())
str = fmt.Sprintf("%s DBlock = %x \n", str, ds.DirectoryBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ABlock = %x \n", str, ds.AdminBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s FBlock = %x \n", str, ds.FactoidBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ECBlock = %x \n", str, ds.EntryCreditBlock.GetHash().Bytes()[:5])
}
return str
}
func (list *DBStateList) GetHighestRecordedBlock() uint32 {
ht := uint32(0)
for i, dbstate := range list.DBStates {
if dbstate != nil && dbstate.Saved {
ht = list.Base + uint32(i)
}
}
return ht
}
// Once a second at most, we check to see if we need to pull down some blocks to catch up.
func (list *DBStateList) Catchup() {
now := list.State.GetTimestamp()
dbsHeight := list.GetHighestRecordedBlock()
// We only check if we need updates once every so often.
if int(now)/1000-int(list.LastTime)/1000 < SecondsBetweenTests {
return
}
list.LastTime = now
begin := -1
end := -1
// Find the first range of blocks that we don't have.
for i, v := range list.DBStates {
if (v == nil || v.DirectoryBlock == nil) && begin < 0 {
begin = i
}
if v == nil {
end = i
}
}
if begin > 0 {
begin += int(list.Base)
end += int(list.Base)
} else {
plHeight := list.State.GetHighestKnownBlock()
// Don't worry about the block initialization case.
if plHeight < 1 {
return
}
if plHeight > dbsHeight && plHeight-dbsHeight > 1 {
list.State.ProcessLists.Reset(dbsHeight)
begin = int(dbsHeight + 1)
end = int(plHeight - 1)
} else {
return
}
}
list.Lastreq = begin
end2 := begin + 400
if end < end2 {
end2 = end
}
msg := messages.NewDBStateMissing(list.State, uint32(begin), uint32(end2))
if msg != nil {
list.State.NetworkOutMsgQueue() <- msg
list.State.stallQueue = make(chan interfaces.IMsg, 10000)
list.State.NewMinute()
}
}
func (list *DBStateList) UpdateState() (progress bool) {
list.Catchup()
for i, d := range list.DBStates {
// Must process blocks in sequence. Missing a block says we must stop.
if d == nil {
return
}
if d.Saved {
continue
}
// Make sure the directory block is properly synced up with the prior block, if there
// is one.
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk == nil {
if i > 0 {
p := list.DBStates[i-1]
if !p.Saved {
continue
}
}
list.State.DBMutex.Lock()
list.State.DB.StartMultiBatch()
list.State.DBMutex.Unlock()
//fmt.Println("Saving DBHeight ", d.DirectoryBlock.GetHeader().GetDBHeight(), " on ", list.State.GetFactomNodeName())
// If we have previous blocks, update blocks that this follower potentially constructed. We can optimize and skip
// this step if we got the block from a peer. TODO we must however check the sigantures on the
// block before we write it to disk.
if i > 0 {
p := list.DBStates[i-1]
hash, err := p.AdminBlock.FullHash()
if err != nil {
return
}
hash, err = p.EntryCreditBlock.HeaderHash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevHeaderHash(hash)
hash, err = p.EntryCreditBlock.Hash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevFullHash(hash)
d.AdminBlock.GetHeader().SetPrevFullHash(hash)
p.FactoidBlock.SetDBHeight(p.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetDBHeight(d.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetPrevKeyMR(p.FactoidBlock.GetKeyMR().Bytes())
d.FactoidBlock.SetPrevFullHash(p.FactoidBlock.GetPrevFullHash().Bytes())
d.DirectoryBlock.GetHeader().SetPrevFullHash(p.DirectoryBlock.GetHeader().GetFullHash())
d.DirectoryBlock.GetHeader().SetPrevKeyMR(p.DirectoryBlock.GetKeyMR())
d.DirectoryBlock.GetHeader().SetTimestamp(0)
d.DirectoryBlock.GetDBEntries()[0].SetKeyMR(d.AdminBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[1].SetKeyMR(d.EntryCreditBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[2].SetKeyMR(d.FactoidBlock.GetHash())
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
key, err := eb.KeyMR()
if err != nil {
panic(err.Error())
}
d.DirectoryBlock.AddEntry(eb.GetChainID(), key)
}
d.DirectoryBlock.GetKeyMR()
_, err = d.DirectoryBlock.BuildBodyMR()
if err != nil {
panic(err.Error())
}
}
list.State.DBMutex.Lock()
if err := list.State.DB.ProcessDBlockMultiBatch(d.DirectoryBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessABlockMultiBatch(d.AdminBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessFBlockMultiBatch(d.FactoidBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessECBlockMultiBatch(d.EntryCreditBlock, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
if err := list.State.DB.ProcessEBlockMultiBatch(eb, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
for _, e := range eb.GetBody().GetEBEntries() {
if err := list.State.DB.InsertEntry(pl.NewEntries[e.Fixed()]); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
}
}
if err := list.State.DB.ExecuteMultiBatch(); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
list.State.DBMutex.Unlock()
}
list.State.DBMutex.Lock()
dblk2, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk2 == nil {
fmt.Printf("Failed to save the Directory Block %d %x\n",
d.DirectoryBlock.GetHeader().GetDBHeight(),
d.DirectoryBlock.GetKeyMR().Bytes()[:3])
}
list.LastTime = list.State.GetTimestamp() // If I saved or processed stuff, I'm good for a while
d.Saved = true // Only after all is done will I admit this state has been saved.
// Any updates required to the state as established by the AdminBlock are applied here.
d.AdminBlock.UpdateState(list.State)
// Process the Factoid End of Block
fs := list.State.GetFactoidState()
fs.AddTransactionBlock(d.FactoidBlock)
fs.AddECBlock(d.EntryCreditBlock)
fs.ProcessEndOfBlock(list.State)
// Step my counter of Complete blocks
if uint32(i) > list.Complete {
list.Complete = uint32(i)
}
progress = true
}
return
}
func (list *DBStateList) Last() *DBState {
last := (*DBState)(nil)
for _, ds := range list.DBStates {
if ds == nil || ds.DirectoryBlock == nil {
return last
}
last = ds
}
return last
}
func (list *DBStateList) Highest() uint32 {
high := list.Base + uint32(len(list.DBStates)) - 1
if high == 0 && len(list.DBStates) == 1 {
return 1
}
return high
}
func (list *DBStateList) Put(dbState *DBState) {
// Hold off on any requests if I'm actually processing...
list.LastTime = list.State.GetTimestamp()
dblk := dbState.DirectoryBlock
dbheight := dblk.GetHeader().GetDBHeight()
// Count completed states, starting from the beginning (since base starts at
// zero.
cnt := 0
for i, v := range list.DBStates {
if v == nil || v.DirectoryBlock == nil || !v.Saved {
if v != nil && v.DirectoryBlock == nil { | list.DBStates[i] = nil
}
break
}
cnt++
}
keep := uint32(2) // How many states to keep around; debugging helps with more.
if uint32(cnt) > keep {
var dbstates []*DBState
dbstates = append(dbstates, list.DBStates[cnt-int(keep):]...)
list.DBStates = dbstates
list.Base = list.Base + uint32(cnt) - keep
list.Complete = list.Complete - uint32(cnt) + keep
}
index := int(dbheight) - int(list.Base)
// If we have already processed this State, ignore it.
if index < int(list.Complete) {
if list.State.GetOut() {
list.State.Println("Ignoring! Block vs Base: ", dbheight, "/", list.Base)
}
return
}
// make room for this entry.
for len(list.DBStates) <= index {
list.DBStates = append(list.DBStates, nil)
}
if list.DBStates[index] == nil {
list.DBStates[index] = dbState
}
hash, err := dbState.AdminBlock.GetKeyMR()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[0].SetKeyMR(hash)
hash, err = dbState.EntryCreditBlock.Hash()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[1].SetKeyMR(hash)
hash = dbState.FactoidBlock.GetHash()
dbState.DirectoryBlock.GetDBEntries()[2].SetKeyMR(hash)
}
func (list *DBStateList) Get(height uint32) *DBState {
i := int(height) - int(list.Base)
if i < 0 || i >= len(list.DBStates) {
return nil
}
return list.DBStates[i]
}
func (list *DBStateList) NewDBState(isNew bool,
directoryBlock interfaces.IDirectoryBlock,
adminBlock interfaces.IAdminBlock,
factoidBlock interfaces.IFBlock,
entryCreditBlock interfaces.IEntryCreditBlock) *DBState {
dbState := new(DBState)
dbState.DBHash = directoryBlock.GetHash()
dbState.ABHash = adminBlock.GetHash()
dbState.FBHash = factoidBlock.GetHash()
dbState.ECHash = entryCreditBlock.GetHash()
dbState.isNew = isNew
dbState.DirectoryBlock = directoryBlock
dbState.AdminBlock = adminBlock
dbState.FactoidBlock = factoidBlock
dbState.EntryCreditBlock = entryCreditBlock
list.Put(dbState)
return dbState
} | random_line_split | |
dbStateManager.go | // Copyright 2015 FactomProject Authors. All rights reserved.
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package state
import (
"encoding/hex"
"fmt"
"github.com/FactomProject/factomd/common/interfaces"
"github.com/FactomProject/factomd/common/messages"
"github.com/FactomProject/factomd/log"
"time"
)
var _ = hex.EncodeToString
var _ = fmt.Print
var _ = time.Now()
var _ = log.Print
type DBState struct {
isNew bool
DBHash interfaces.IHash
ABHash interfaces.IHash
FBHash interfaces.IHash
ECHash interfaces.IHash
DirectoryBlock interfaces.IDirectoryBlock
AdminBlock interfaces.IAdminBlock
FactoidBlock interfaces.IFBlock
EntryCreditBlock interfaces.IEntryCreditBlock
Saved bool
}
type DBStateList struct {
LastTime interfaces.Timestamp
SecondsBetweenTests int
Lastreq int
State *State
Base uint32
Complete uint32
DBStates []*DBState
}
const SecondsBetweenTests = 3 // Default
func (list *DBStateList) String() string {
str := "\nDBStates\n"
str = fmt.Sprintf("%s Base = %d\n", str, list.Base)
str = fmt.Sprintf("%s timestamp = %s\n", str, list.LastTime.String())
str = fmt.Sprintf("%s Complete = %d\n", str, list.Complete)
rec := "M"
last := ""
for i, ds := range list.DBStates {
rec = "M"
if ds != nil && ds.DirectoryBlock != nil {
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByHash(ds.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk != nil {
rec = "R"
}
if ds.Saved {
rec = "S"
}
}
if last != "" {
str = last
}
str = fmt.Sprintf("%s %1s-DState\n DState Height: %d\n%s", str, rec, list.Base+uint32(i), ds.String())
if rec == "M" && last == "" {
last = str
}
}
return str
}
func (ds *DBState) String() string {
str := ""
if ds == nil {
str = " DBState = <nil>\n"
} else if ds.DirectoryBlock == nil {
str = " Directory Block = <nil>\n"
} else {
str = fmt.Sprintf("%s DBlk Height = %v\n", str, ds.DirectoryBlock.GetHeader().GetDBHeight())
str = fmt.Sprintf("%s DBlock = %x \n", str, ds.DirectoryBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ABlock = %x \n", str, ds.AdminBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s FBlock = %x \n", str, ds.FactoidBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ECBlock = %x \n", str, ds.EntryCreditBlock.GetHash().Bytes()[:5])
}
return str
}
func (list *DBStateList) GetHighestRecordedBlock() uint32 {
ht := uint32(0)
for i, dbstate := range list.DBStates {
if dbstate != nil && dbstate.Saved {
ht = list.Base + uint32(i)
}
}
return ht
}
// Once a second at most, we check to see if we need to pull down some blocks to catch up.
func (list *DBStateList) Catchup() {
now := list.State.GetTimestamp()
dbsHeight := list.GetHighestRecordedBlock()
// We only check if we need updates once every so often.
if int(now)/1000-int(list.LastTime)/1000 < SecondsBetweenTests {
return
}
list.LastTime = now
begin := -1
end := -1
// Find the first range of blocks that we don't have.
for i, v := range list.DBStates {
if (v == nil || v.DirectoryBlock == nil) && begin < 0 {
begin = i
}
if v == nil {
end = i
}
}
if begin > 0 {
begin += int(list.Base)
end += int(list.Base)
} else {
plHeight := list.State.GetHighestKnownBlock()
// Don't worry about the block initialization case.
if plHeight < 1 {
return
}
if plHeight > dbsHeight && plHeight-dbsHeight > 1 {
list.State.ProcessLists.Reset(dbsHeight)
begin = int(dbsHeight + 1)
end = int(plHeight - 1)
} else {
return
}
}
list.Lastreq = begin
end2 := begin + 400
if end < end2 {
end2 = end
}
msg := messages.NewDBStateMissing(list.State, uint32(begin), uint32(end2))
if msg != nil {
list.State.NetworkOutMsgQueue() <- msg
list.State.stallQueue = make(chan interfaces.IMsg, 10000)
list.State.NewMinute()
}
}
func (list *DBStateList) UpdateState() (progress bool) {
list.Catchup()
for i, d := range list.DBStates {
// Must process blocks in sequence. Missing a block says we must stop.
if d == nil {
return
}
if d.Saved {
continue
}
// Make sure the directory block is properly synced up with the prior block, if there
// is one.
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk == nil {
if i > 0 {
p := list.DBStates[i-1]
if !p.Saved {
continue
}
}
list.State.DBMutex.Lock()
list.State.DB.StartMultiBatch()
list.State.DBMutex.Unlock()
//fmt.Println("Saving DBHeight ", d.DirectoryBlock.GetHeader().GetDBHeight(), " on ", list.State.GetFactomNodeName())
// If we have previous blocks, update blocks that this follower potentially constructed. We can optimize and skip
// this step if we got the block from a peer. TODO we must however check the sigantures on the
// block before we write it to disk.
if i > 0 {
p := list.DBStates[i-1]
hash, err := p.AdminBlock.FullHash()
if err != nil {
return
}
hash, err = p.EntryCreditBlock.HeaderHash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevHeaderHash(hash)
hash, err = p.EntryCreditBlock.Hash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevFullHash(hash)
d.AdminBlock.GetHeader().SetPrevFullHash(hash)
p.FactoidBlock.SetDBHeight(p.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetDBHeight(d.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetPrevKeyMR(p.FactoidBlock.GetKeyMR().Bytes())
d.FactoidBlock.SetPrevFullHash(p.FactoidBlock.GetPrevFullHash().Bytes())
d.DirectoryBlock.GetHeader().SetPrevFullHash(p.DirectoryBlock.GetHeader().GetFullHash())
d.DirectoryBlock.GetHeader().SetPrevKeyMR(p.DirectoryBlock.GetKeyMR())
d.DirectoryBlock.GetHeader().SetTimestamp(0)
d.DirectoryBlock.GetDBEntries()[0].SetKeyMR(d.AdminBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[1].SetKeyMR(d.EntryCreditBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[2].SetKeyMR(d.FactoidBlock.GetHash())
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
key, err := eb.KeyMR()
if err != nil {
panic(err.Error())
}
d.DirectoryBlock.AddEntry(eb.GetChainID(), key)
}
d.DirectoryBlock.GetKeyMR()
_, err = d.DirectoryBlock.BuildBodyMR()
if err != nil {
panic(err.Error())
}
}
list.State.DBMutex.Lock()
if err := list.State.DB.ProcessDBlockMultiBatch(d.DirectoryBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessABlockMultiBatch(d.AdminBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessFBlockMultiBatch(d.FactoidBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessECBlockMultiBatch(d.EntryCreditBlock, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
if err := list.State.DB.ProcessEBlockMultiBatch(eb, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
for _, e := range eb.GetBody().GetEBEntries() {
if err := list.State.DB.InsertEntry(pl.NewEntries[e.Fixed()]); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
}
}
if err := list.State.DB.ExecuteMultiBatch(); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
list.State.DBMutex.Unlock()
}
list.State.DBMutex.Lock()
dblk2, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk2 == nil {
fmt.Printf("Failed to save the Directory Block %d %x\n",
d.DirectoryBlock.GetHeader().GetDBHeight(),
d.DirectoryBlock.GetKeyMR().Bytes()[:3])
}
list.LastTime = list.State.GetTimestamp() // If I saved or processed stuff, I'm good for a while
d.Saved = true // Only after all is done will I admit this state has been saved.
// Any updates required to the state as established by the AdminBlock are applied here.
d.AdminBlock.UpdateState(list.State)
// Process the Factoid End of Block
fs := list.State.GetFactoidState()
fs.AddTransactionBlock(d.FactoidBlock)
fs.AddECBlock(d.EntryCreditBlock)
fs.ProcessEndOfBlock(list.State)
// Step my counter of Complete blocks
if uint32(i) > list.Complete {
list.Complete = uint32(i)
}
progress = true
}
return
}
func (list *DBStateList) Last() *DBState {
last := (*DBState)(nil)
for _, ds := range list.DBStates {
if ds == nil || ds.DirectoryBlock == nil {
return last
}
last = ds
}
return last
}
func (list *DBStateList) Highest() uint32 {
high := list.Base + uint32(len(list.DBStates)) - 1
if high == 0 && len(list.DBStates) == 1 |
return high
}
func (list *DBStateList) Put(dbState *DBState) {
// Hold off on any requests if I'm actually processing...
list.LastTime = list.State.GetTimestamp()
dblk := dbState.DirectoryBlock
dbheight := dblk.GetHeader().GetDBHeight()
// Count completed states, starting from the beginning (since base starts at
// zero.
cnt := 0
for i, v := range list.DBStates {
if v == nil || v.DirectoryBlock == nil || !v.Saved {
if v != nil && v.DirectoryBlock == nil {
list.DBStates[i] = nil
}
break
}
cnt++
}
keep := uint32(2) // How many states to keep around; debugging helps with more.
if uint32(cnt) > keep {
var dbstates []*DBState
dbstates = append(dbstates, list.DBStates[cnt-int(keep):]...)
list.DBStates = dbstates
list.Base = list.Base + uint32(cnt) - keep
list.Complete = list.Complete - uint32(cnt) + keep
}
index := int(dbheight) - int(list.Base)
// If we have already processed this State, ignore it.
if index < int(list.Complete) {
if list.State.GetOut() {
list.State.Println("Ignoring! Block vs Base: ", dbheight, "/", list.Base)
}
return
}
// make room for this entry.
for len(list.DBStates) <= index {
list.DBStates = append(list.DBStates, nil)
}
if list.DBStates[index] == nil {
list.DBStates[index] = dbState
}
hash, err := dbState.AdminBlock.GetKeyMR()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[0].SetKeyMR(hash)
hash, err = dbState.EntryCreditBlock.Hash()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[1].SetKeyMR(hash)
hash = dbState.FactoidBlock.GetHash()
dbState.DirectoryBlock.GetDBEntries()[2].SetKeyMR(hash)
}
func (list *DBStateList) Get(height uint32) *DBState {
i := int(height) - int(list.Base)
if i < 0 || i >= len(list.DBStates) {
return nil
}
return list.DBStates[i]
}
func (list *DBStateList) NewDBState(isNew bool,
directoryBlock interfaces.IDirectoryBlock,
adminBlock interfaces.IAdminBlock,
factoidBlock interfaces.IFBlock,
entryCreditBlock interfaces.IEntryCreditBlock) *DBState {
dbState := new(DBState)
dbState.DBHash = directoryBlock.GetHash()
dbState.ABHash = adminBlock.GetHash()
dbState.FBHash = factoidBlock.GetHash()
dbState.ECHash = entryCreditBlock.GetHash()
dbState.isNew = isNew
dbState.DirectoryBlock = directoryBlock
dbState.AdminBlock = adminBlock
dbState.FactoidBlock = factoidBlock
dbState.EntryCreditBlock = entryCreditBlock
list.Put(dbState)
return dbState
}
| {
return 1
} | conditional_block |
dbStateManager.go | // Copyright 2015 FactomProject Authors. All rights reserved.
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package state
import (
"encoding/hex"
"fmt"
"github.com/FactomProject/factomd/common/interfaces"
"github.com/FactomProject/factomd/common/messages"
"github.com/FactomProject/factomd/log"
"time"
)
var _ = hex.EncodeToString
var _ = fmt.Print
var _ = time.Now()
var _ = log.Print
type DBState struct {
isNew bool
DBHash interfaces.IHash
ABHash interfaces.IHash
FBHash interfaces.IHash
ECHash interfaces.IHash
DirectoryBlock interfaces.IDirectoryBlock
AdminBlock interfaces.IAdminBlock
FactoidBlock interfaces.IFBlock
EntryCreditBlock interfaces.IEntryCreditBlock
Saved bool
}
type DBStateList struct {
LastTime interfaces.Timestamp
SecondsBetweenTests int
Lastreq int
State *State
Base uint32
Complete uint32
DBStates []*DBState
}
const SecondsBetweenTests = 3 // Default
func (list *DBStateList) String() string {
str := "\nDBStates\n"
str = fmt.Sprintf("%s Base = %d\n", str, list.Base)
str = fmt.Sprintf("%s timestamp = %s\n", str, list.LastTime.String())
str = fmt.Sprintf("%s Complete = %d\n", str, list.Complete)
rec := "M"
last := ""
for i, ds := range list.DBStates {
rec = "M"
if ds != nil && ds.DirectoryBlock != nil {
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByHash(ds.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk != nil {
rec = "R"
}
if ds.Saved {
rec = "S"
}
}
if last != "" {
str = last
}
str = fmt.Sprintf("%s %1s-DState\n DState Height: %d\n%s", str, rec, list.Base+uint32(i), ds.String())
if rec == "M" && last == "" {
last = str
}
}
return str
}
func (ds *DBState) String() string {
str := ""
if ds == nil {
str = " DBState = <nil>\n"
} else if ds.DirectoryBlock == nil {
str = " Directory Block = <nil>\n"
} else {
str = fmt.Sprintf("%s DBlk Height = %v\n", str, ds.DirectoryBlock.GetHeader().GetDBHeight())
str = fmt.Sprintf("%s DBlock = %x \n", str, ds.DirectoryBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ABlock = %x \n", str, ds.AdminBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s FBlock = %x \n", str, ds.FactoidBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ECBlock = %x \n", str, ds.EntryCreditBlock.GetHash().Bytes()[:5])
}
return str
}
func (list *DBStateList) GetHighestRecordedBlock() uint32 {
ht := uint32(0)
for i, dbstate := range list.DBStates {
if dbstate != nil && dbstate.Saved {
ht = list.Base + uint32(i)
}
}
return ht
}
// Once a second at most, we check to see if we need to pull down some blocks to catch up.
func (list *DBStateList) Catchup() {
now := list.State.GetTimestamp()
dbsHeight := list.GetHighestRecordedBlock()
// We only check if we need updates once every so often.
if int(now)/1000-int(list.LastTime)/1000 < SecondsBetweenTests {
return
}
list.LastTime = now
begin := -1
end := -1
// Find the first range of blocks that we don't have.
for i, v := range list.DBStates {
if (v == nil || v.DirectoryBlock == nil) && begin < 0 {
begin = i
}
if v == nil {
end = i
}
}
if begin > 0 {
begin += int(list.Base)
end += int(list.Base)
} else {
plHeight := list.State.GetHighestKnownBlock()
// Don't worry about the block initialization case.
if plHeight < 1 {
return
}
if plHeight > dbsHeight && plHeight-dbsHeight > 1 {
list.State.ProcessLists.Reset(dbsHeight)
begin = int(dbsHeight + 1)
end = int(plHeight - 1)
} else {
return
}
}
list.Lastreq = begin
end2 := begin + 400
if end < end2 {
end2 = end
}
msg := messages.NewDBStateMissing(list.State, uint32(begin), uint32(end2))
if msg != nil {
list.State.NetworkOutMsgQueue() <- msg
list.State.stallQueue = make(chan interfaces.IMsg, 10000)
list.State.NewMinute()
}
}
func (list *DBStateList) UpdateState() (progress bool) {
list.Catchup()
for i, d := range list.DBStates {
// Must process blocks in sequence. Missing a block says we must stop.
if d == nil {
return
}
if d.Saved {
continue
}
// Make sure the directory block is properly synced up with the prior block, if there
// is one.
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk == nil {
if i > 0 {
p := list.DBStates[i-1]
if !p.Saved {
continue
}
}
list.State.DBMutex.Lock()
list.State.DB.StartMultiBatch()
list.State.DBMutex.Unlock()
//fmt.Println("Saving DBHeight ", d.DirectoryBlock.GetHeader().GetDBHeight(), " on ", list.State.GetFactomNodeName())
// If we have previous blocks, update blocks that this follower potentially constructed. We can optimize and skip
// this step if we got the block from a peer. TODO we must however check the sigantures on the
// block before we write it to disk.
if i > 0 {
p := list.DBStates[i-1]
hash, err := p.AdminBlock.FullHash()
if err != nil {
return
}
hash, err = p.EntryCreditBlock.HeaderHash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevHeaderHash(hash)
hash, err = p.EntryCreditBlock.Hash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevFullHash(hash)
d.AdminBlock.GetHeader().SetPrevFullHash(hash)
p.FactoidBlock.SetDBHeight(p.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetDBHeight(d.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetPrevKeyMR(p.FactoidBlock.GetKeyMR().Bytes())
d.FactoidBlock.SetPrevFullHash(p.FactoidBlock.GetPrevFullHash().Bytes())
d.DirectoryBlock.GetHeader().SetPrevFullHash(p.DirectoryBlock.GetHeader().GetFullHash())
d.DirectoryBlock.GetHeader().SetPrevKeyMR(p.DirectoryBlock.GetKeyMR())
d.DirectoryBlock.GetHeader().SetTimestamp(0)
d.DirectoryBlock.GetDBEntries()[0].SetKeyMR(d.AdminBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[1].SetKeyMR(d.EntryCreditBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[2].SetKeyMR(d.FactoidBlock.GetHash())
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
key, err := eb.KeyMR()
if err != nil {
panic(err.Error())
}
d.DirectoryBlock.AddEntry(eb.GetChainID(), key)
}
d.DirectoryBlock.GetKeyMR()
_, err = d.DirectoryBlock.BuildBodyMR()
if err != nil {
panic(err.Error())
}
}
list.State.DBMutex.Lock()
if err := list.State.DB.ProcessDBlockMultiBatch(d.DirectoryBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessABlockMultiBatch(d.AdminBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessFBlockMultiBatch(d.FactoidBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessECBlockMultiBatch(d.EntryCreditBlock, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
if err := list.State.DB.ProcessEBlockMultiBatch(eb, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
for _, e := range eb.GetBody().GetEBEntries() {
if err := list.State.DB.InsertEntry(pl.NewEntries[e.Fixed()]); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
}
}
if err := list.State.DB.ExecuteMultiBatch(); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
list.State.DBMutex.Unlock()
}
list.State.DBMutex.Lock()
dblk2, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk2 == nil {
fmt.Printf("Failed to save the Directory Block %d %x\n",
d.DirectoryBlock.GetHeader().GetDBHeight(),
d.DirectoryBlock.GetKeyMR().Bytes()[:3])
}
list.LastTime = list.State.GetTimestamp() // If I saved or processed stuff, I'm good for a while
d.Saved = true // Only after all is done will I admit this state has been saved.
// Any updates required to the state as established by the AdminBlock are applied here.
d.AdminBlock.UpdateState(list.State)
// Process the Factoid End of Block
fs := list.State.GetFactoidState()
fs.AddTransactionBlock(d.FactoidBlock)
fs.AddECBlock(d.EntryCreditBlock)
fs.ProcessEndOfBlock(list.State)
// Step my counter of Complete blocks
if uint32(i) > list.Complete {
list.Complete = uint32(i)
}
progress = true
}
return
}
func (list *DBStateList) Last() *DBState {
last := (*DBState)(nil)
for _, ds := range list.DBStates {
if ds == nil || ds.DirectoryBlock == nil {
return last
}
last = ds
}
return last
}
func (list *DBStateList) Highest() uint32 {
high := list.Base + uint32(len(list.DBStates)) - 1
if high == 0 && len(list.DBStates) == 1 {
return 1
}
return high
}
func (list *DBStateList) Put(dbState *DBState) {
// Hold off on any requests if I'm actually processing...
list.LastTime = list.State.GetTimestamp()
dblk := dbState.DirectoryBlock
dbheight := dblk.GetHeader().GetDBHeight()
// Count completed states, starting from the beginning (since base starts at
// zero.
cnt := 0
for i, v := range list.DBStates {
if v == nil || v.DirectoryBlock == nil || !v.Saved {
if v != nil && v.DirectoryBlock == nil {
list.DBStates[i] = nil
}
break
}
cnt++
}
keep := uint32(2) // How many states to keep around; debugging helps with more.
if uint32(cnt) > keep {
var dbstates []*DBState
dbstates = append(dbstates, list.DBStates[cnt-int(keep):]...)
list.DBStates = dbstates
list.Base = list.Base + uint32(cnt) - keep
list.Complete = list.Complete - uint32(cnt) + keep
}
index := int(dbheight) - int(list.Base)
// If we have already processed this State, ignore it.
if index < int(list.Complete) {
if list.State.GetOut() {
list.State.Println("Ignoring! Block vs Base: ", dbheight, "/", list.Base)
}
return
}
// make room for this entry.
for len(list.DBStates) <= index {
list.DBStates = append(list.DBStates, nil)
}
if list.DBStates[index] == nil {
list.DBStates[index] = dbState
}
hash, err := dbState.AdminBlock.GetKeyMR()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[0].SetKeyMR(hash)
hash, err = dbState.EntryCreditBlock.Hash()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[1].SetKeyMR(hash)
hash = dbState.FactoidBlock.GetHash()
dbState.DirectoryBlock.GetDBEntries()[2].SetKeyMR(hash)
}
func (list *DBStateList) Get(height uint32) *DBState |
func (list *DBStateList) NewDBState(isNew bool,
directoryBlock interfaces.IDirectoryBlock,
adminBlock interfaces.IAdminBlock,
factoidBlock interfaces.IFBlock,
entryCreditBlock interfaces.IEntryCreditBlock) *DBState {
dbState := new(DBState)
dbState.DBHash = directoryBlock.GetHash()
dbState.ABHash = adminBlock.GetHash()
dbState.FBHash = factoidBlock.GetHash()
dbState.ECHash = entryCreditBlock.GetHash()
dbState.isNew = isNew
dbState.DirectoryBlock = directoryBlock
dbState.AdminBlock = adminBlock
dbState.FactoidBlock = factoidBlock
dbState.EntryCreditBlock = entryCreditBlock
list.Put(dbState)
return dbState
}
| {
i := int(height) - int(list.Base)
if i < 0 || i >= len(list.DBStates) {
return nil
}
return list.DBStates[i]
} | identifier_body |
dbStateManager.go | // Copyright 2015 FactomProject Authors. All rights reserved.
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package state
import (
"encoding/hex"
"fmt"
"github.com/FactomProject/factomd/common/interfaces"
"github.com/FactomProject/factomd/common/messages"
"github.com/FactomProject/factomd/log"
"time"
)
var _ = hex.EncodeToString
var _ = fmt.Print
var _ = time.Now()
var _ = log.Print
type DBState struct {
isNew bool
DBHash interfaces.IHash
ABHash interfaces.IHash
FBHash interfaces.IHash
ECHash interfaces.IHash
DirectoryBlock interfaces.IDirectoryBlock
AdminBlock interfaces.IAdminBlock
FactoidBlock interfaces.IFBlock
EntryCreditBlock interfaces.IEntryCreditBlock
Saved bool
}
type DBStateList struct {
LastTime interfaces.Timestamp
SecondsBetweenTests int
Lastreq int
State *State
Base uint32
Complete uint32
DBStates []*DBState
}
const SecondsBetweenTests = 3 // Default
func (list *DBStateList) String() string {
str := "\nDBStates\n"
str = fmt.Sprintf("%s Base = %d\n", str, list.Base)
str = fmt.Sprintf("%s timestamp = %s\n", str, list.LastTime.String())
str = fmt.Sprintf("%s Complete = %d\n", str, list.Complete)
rec := "M"
last := ""
for i, ds := range list.DBStates {
rec = "M"
if ds != nil && ds.DirectoryBlock != nil {
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByHash(ds.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk != nil {
rec = "R"
}
if ds.Saved {
rec = "S"
}
}
if last != "" {
str = last
}
str = fmt.Sprintf("%s %1s-DState\n DState Height: %d\n%s", str, rec, list.Base+uint32(i), ds.String())
if rec == "M" && last == "" {
last = str
}
}
return str
}
func (ds *DBState) String() string {
str := ""
if ds == nil {
str = " DBState = <nil>\n"
} else if ds.DirectoryBlock == nil {
str = " Directory Block = <nil>\n"
} else {
str = fmt.Sprintf("%s DBlk Height = %v\n", str, ds.DirectoryBlock.GetHeader().GetDBHeight())
str = fmt.Sprintf("%s DBlock = %x \n", str, ds.DirectoryBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ABlock = %x \n", str, ds.AdminBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s FBlock = %x \n", str, ds.FactoidBlock.GetHash().Bytes()[:5])
str = fmt.Sprintf("%s ECBlock = %x \n", str, ds.EntryCreditBlock.GetHash().Bytes()[:5])
}
return str
}
func (list *DBStateList) GetHighestRecordedBlock() uint32 {
ht := uint32(0)
for i, dbstate := range list.DBStates {
if dbstate != nil && dbstate.Saved {
ht = list.Base + uint32(i)
}
}
return ht
}
// Once a second at most, we check to see if we need to pull down some blocks to catch up.
func (list *DBStateList) Catchup() {
now := list.State.GetTimestamp()
dbsHeight := list.GetHighestRecordedBlock()
// We only check if we need updates once every so often.
if int(now)/1000-int(list.LastTime)/1000 < SecondsBetweenTests {
return
}
list.LastTime = now
begin := -1
end := -1
// Find the first range of blocks that we don't have.
for i, v := range list.DBStates {
if (v == nil || v.DirectoryBlock == nil) && begin < 0 {
begin = i
}
if v == nil {
end = i
}
}
if begin > 0 {
begin += int(list.Base)
end += int(list.Base)
} else {
plHeight := list.State.GetHighestKnownBlock()
// Don't worry about the block initialization case.
if plHeight < 1 {
return
}
if plHeight > dbsHeight && plHeight-dbsHeight > 1 {
list.State.ProcessLists.Reset(dbsHeight)
begin = int(dbsHeight + 1)
end = int(plHeight - 1)
} else {
return
}
}
list.Lastreq = begin
end2 := begin + 400
if end < end2 {
end2 = end
}
msg := messages.NewDBStateMissing(list.State, uint32(begin), uint32(end2))
if msg != nil {
list.State.NetworkOutMsgQueue() <- msg
list.State.stallQueue = make(chan interfaces.IMsg, 10000)
list.State.NewMinute()
}
}
func (list *DBStateList) | () (progress bool) {
list.Catchup()
for i, d := range list.DBStates {
// Must process blocks in sequence. Missing a block says we must stop.
if d == nil {
return
}
if d.Saved {
continue
}
// Make sure the directory block is properly synced up with the prior block, if there
// is one.
list.State.DBMutex.Lock()
dblk, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk == nil {
if i > 0 {
p := list.DBStates[i-1]
if !p.Saved {
continue
}
}
list.State.DBMutex.Lock()
list.State.DB.StartMultiBatch()
list.State.DBMutex.Unlock()
//fmt.Println("Saving DBHeight ", d.DirectoryBlock.GetHeader().GetDBHeight(), " on ", list.State.GetFactomNodeName())
// If we have previous blocks, update blocks that this follower potentially constructed. We can optimize and skip
// this step if we got the block from a peer. TODO we must however check the sigantures on the
// block before we write it to disk.
if i > 0 {
p := list.DBStates[i-1]
hash, err := p.AdminBlock.FullHash()
if err != nil {
return
}
hash, err = p.EntryCreditBlock.HeaderHash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevHeaderHash(hash)
hash, err = p.EntryCreditBlock.Hash()
if err != nil {
return
}
d.EntryCreditBlock.GetHeader().SetPrevFullHash(hash)
d.AdminBlock.GetHeader().SetPrevFullHash(hash)
p.FactoidBlock.SetDBHeight(p.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetDBHeight(d.DirectoryBlock.GetHeader().GetDBHeight())
d.FactoidBlock.SetPrevKeyMR(p.FactoidBlock.GetKeyMR().Bytes())
d.FactoidBlock.SetPrevFullHash(p.FactoidBlock.GetPrevFullHash().Bytes())
d.DirectoryBlock.GetHeader().SetPrevFullHash(p.DirectoryBlock.GetHeader().GetFullHash())
d.DirectoryBlock.GetHeader().SetPrevKeyMR(p.DirectoryBlock.GetKeyMR())
d.DirectoryBlock.GetHeader().SetTimestamp(0)
d.DirectoryBlock.GetDBEntries()[0].SetKeyMR(d.AdminBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[1].SetKeyMR(d.EntryCreditBlock.GetHash())
d.DirectoryBlock.GetDBEntries()[2].SetKeyMR(d.FactoidBlock.GetHash())
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
key, err := eb.KeyMR()
if err != nil {
panic(err.Error())
}
d.DirectoryBlock.AddEntry(eb.GetChainID(), key)
}
d.DirectoryBlock.GetKeyMR()
_, err = d.DirectoryBlock.BuildBodyMR()
if err != nil {
panic(err.Error())
}
}
list.State.DBMutex.Lock()
if err := list.State.DB.ProcessDBlockMultiBatch(d.DirectoryBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessABlockMultiBatch(d.AdminBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessFBlockMultiBatch(d.FactoidBlock); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
if err := list.State.DB.ProcessECBlockMultiBatch(d.EntryCreditBlock, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
pl := list.State.ProcessLists.Get(d.DirectoryBlock.GetHeader().GetDBHeight())
for _, eb := range pl.NewEBlocks {
if err := list.State.DB.ProcessEBlockMultiBatch(eb, false); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
for _, e := range eb.GetBody().GetEBEntries() {
if err := list.State.DB.InsertEntry(pl.NewEntries[e.Fixed()]); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
}
}
if err := list.State.DB.ExecuteMultiBatch(); err != nil {
list.State.DBMutex.Unlock()
panic(err.Error())
}
list.State.DBMutex.Unlock()
}
list.State.DBMutex.Lock()
dblk2, _ := list.State.DB.FetchDBlockByKeyMR(d.DirectoryBlock.GetKeyMR())
list.State.DBMutex.Unlock()
if dblk2 == nil {
fmt.Printf("Failed to save the Directory Block %d %x\n",
d.DirectoryBlock.GetHeader().GetDBHeight(),
d.DirectoryBlock.GetKeyMR().Bytes()[:3])
}
list.LastTime = list.State.GetTimestamp() // If I saved or processed stuff, I'm good for a while
d.Saved = true // Only after all is done will I admit this state has been saved.
// Any updates required to the state as established by the AdminBlock are applied here.
d.AdminBlock.UpdateState(list.State)
// Process the Factoid End of Block
fs := list.State.GetFactoidState()
fs.AddTransactionBlock(d.FactoidBlock)
fs.AddECBlock(d.EntryCreditBlock)
fs.ProcessEndOfBlock(list.State)
// Step my counter of Complete blocks
if uint32(i) > list.Complete {
list.Complete = uint32(i)
}
progress = true
}
return
}
func (list *DBStateList) Last() *DBState {
last := (*DBState)(nil)
for _, ds := range list.DBStates {
if ds == nil || ds.DirectoryBlock == nil {
return last
}
last = ds
}
return last
}
func (list *DBStateList) Highest() uint32 {
high := list.Base + uint32(len(list.DBStates)) - 1
if high == 0 && len(list.DBStates) == 1 {
return 1
}
return high
}
func (list *DBStateList) Put(dbState *DBState) {
// Hold off on any requests if I'm actually processing...
list.LastTime = list.State.GetTimestamp()
dblk := dbState.DirectoryBlock
dbheight := dblk.GetHeader().GetDBHeight()
// Count completed states, starting from the beginning (since base starts at
// zero.
cnt := 0
for i, v := range list.DBStates {
if v == nil || v.DirectoryBlock == nil || !v.Saved {
if v != nil && v.DirectoryBlock == nil {
list.DBStates[i] = nil
}
break
}
cnt++
}
keep := uint32(2) // How many states to keep around; debugging helps with more.
if uint32(cnt) > keep {
var dbstates []*DBState
dbstates = append(dbstates, list.DBStates[cnt-int(keep):]...)
list.DBStates = dbstates
list.Base = list.Base + uint32(cnt) - keep
list.Complete = list.Complete - uint32(cnt) + keep
}
index := int(dbheight) - int(list.Base)
// If we have already processed this State, ignore it.
if index < int(list.Complete) {
if list.State.GetOut() {
list.State.Println("Ignoring! Block vs Base: ", dbheight, "/", list.Base)
}
return
}
// make room for this entry.
for len(list.DBStates) <= index {
list.DBStates = append(list.DBStates, nil)
}
if list.DBStates[index] == nil {
list.DBStates[index] = dbState
}
hash, err := dbState.AdminBlock.GetKeyMR()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[0].SetKeyMR(hash)
hash, err = dbState.EntryCreditBlock.Hash()
if err != nil {
panic(err)
}
dbState.DirectoryBlock.GetDBEntries()[1].SetKeyMR(hash)
hash = dbState.FactoidBlock.GetHash()
dbState.DirectoryBlock.GetDBEntries()[2].SetKeyMR(hash)
}
func (list *DBStateList) Get(height uint32) *DBState {
i := int(height) - int(list.Base)
if i < 0 || i >= len(list.DBStates) {
return nil
}
return list.DBStates[i]
}
func (list *DBStateList) NewDBState(isNew bool,
directoryBlock interfaces.IDirectoryBlock,
adminBlock interfaces.IAdminBlock,
factoidBlock interfaces.IFBlock,
entryCreditBlock interfaces.IEntryCreditBlock) *DBState {
dbState := new(DBState)
dbState.DBHash = directoryBlock.GetHash()
dbState.ABHash = adminBlock.GetHash()
dbState.FBHash = factoidBlock.GetHash()
dbState.ECHash = entryCreditBlock.GetHash()
dbState.isNew = isNew
dbState.DirectoryBlock = directoryBlock
dbState.AdminBlock = adminBlock
dbState.FactoidBlock = factoidBlock
dbState.EntryCreditBlock = entryCreditBlock
list.Put(dbState)
return dbState
}
| UpdateState | identifier_name |
table.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"strings"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func (p *planner) getVirtualTabler() VirtualTabler {
return p.extendedEvalCtx.VirtualSchemas
}
// createDropDatabaseJob queues a job for dropping a database.
func (p *planner) createDropDatabaseJob(
ctx context.Context,
databaseID descpb.ID,
schemasToDrop []descpb.ID,
tableDropDetails []jobspb.DroppedTableDetails,
typesToDrop []*typedesc.Mutable,
jobDesc string,
) error {
// TODO (lucy): This should probably be deleting the queued jobs for all the
// tables being dropped, so that we don't have duplicate schema changers.
tableIDs := make([]descpb.ID, 0, len(tableDropDetails))
for _, d := range tableDropDetails {
tableIDs = append(tableIDs, d.ID)
}
typeIDs := make([]descpb.ID, 0, len(typesToDrop))
for _, t := range typesToDrop {
typeIDs = append(typeIDs, t.ID)
}
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: tableIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: schemasToDrop,
DroppedTables: tableDropDetails,
DroppedTypes: typeIDs,
DroppedDatabaseID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new drop database job %d for database %d", newJob.ID(), databaseID)
return nil
}
// CreateNonDropDatabaseChangeJob covers all database descriptor updates other
// than dropping the database.
// TODO (lucy): This should ideally look into the set of queued jobs so that we
// don't queue multiple jobs for the same database.
func (p *planner) createNonDropDatabaseChangeJob(
ctx context.Context, databaseID descpb.ID, jobDesc string,
) error {
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new database schema change job %d for database %d", newJob.ID(), databaseID)
return nil
}
// createOrUpdateSchemaChangeJob queues a new job for the schema change if there
// is no existing schema change job for the table, or updates the existing job
// if there is one.
func (p *planner) createOrUpdateSchemaChangeJob(
ctx context.Context, tableDesc *tabledesc.Mutable, jobDesc string, mutationID descpb.MutationID,
) error {
var job *jobs.Job
if cachedJob, ok := p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID]; ok {
job = cachedJob
}
if p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup != nil {
p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup(job)
}
var spanList []jobspb.ResumeSpanList
jobExists := job != nil
if jobExists {
spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList
}
span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec)
for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ {
spanList = append(spanList,
jobspb.ResumeSpanList{
ResumeSpans: []roachpb.Span{span},
},
)
}
if !jobExists {
// Queue a new job.
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: descpb.IDs{tableDesc.GetID()},
Details: jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: mutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID] = newJob
// Only add a MutationJob if there's an associated mutation.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
if mutationID != descpb.InvalidMutationID {
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(newJob.ID())})
}
log.Infof(ctx, "queued new schema change job %d for table %d, mutation %d",
newJob.ID(), tableDesc.ID, mutationID)
} else {
// Update the existing job.
oldDetails := job.Details().(jobspb.SchemaChangeDetails)
newDetails := jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: oldDetails.TableMutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
}
if oldDetails.TableMutationID != descpb.InvalidMutationID {
// The previous queued schema change job was associated with a mutation,
// which must have the same mutation ID as this schema change, so just
// check for consistency.
if mutationID != descpb.InvalidMutationID && mutationID != oldDetails.TableMutationID {
return errors.AssertionFailedf(
"attempted to update job for mutation %d, but job already exists with mutation %d",
mutationID, oldDetails.TableMutationID)
}
} else {
// The previous queued schema change job didn't have a mutation.
if mutationID != descpb.InvalidMutationID {
newDetails.TableMutationID = mutationID
// Also add a MutationJob on the table descriptor.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(job.ID())})
}
}
if err := job.SetDetails(ctx, p.txn, newDetails); err != nil {
return err
}
if jobDesc != "" {
if err := job.SetDescription(
ctx, p.txn,
func(ctx context.Context, description string) (string, error) {
return strings.Join([]string{description, jobDesc}, ";"), nil
},
); err != nil {
return err
}
}
log.Infof(ctx, "job %d: updated with schema change for table %d, mutation %d",
job.ID(), tableDesc.ID, mutationID)
}
return nil
}
// writeSchemaChange effectively writes a table descriptor to the
// database within the current planner transaction, and queues up
// a schema changer for future processing.
// TODO (lucy): The way job descriptions are handled needs improvement.
// Currently, whenever we update a job, the provided job description string, if
// non-empty, is appended to the end of the existing description, regardless of
// whether the particular schema change written in this method call came from a
// separate statement in the same transaction, or from updating a dependent
// table descriptor during a schema change to another table, or from a step in a
// larger schema change to the same table.
func (p *planner) writeSchemaChange(
ctx context.Context, tableDesc *tabledesc.Mutable, mutationID descpb.MutationID, jobDesc string,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
if !tableDesc.IsNew() {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeSchemaChangeToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
return p.writeTableDescToBatch(ctx, tableDesc, b)
}
func (p *planner) writeDropTable(
ctx context.Context, tableDesc *tabledesc.Mutable, queueJob bool, jobDesc string,
) error {
if queueJob {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, descpb.InvalidMutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeTableDesc(ctx context.Context, tableDesc *tabledesc.Mutable) error {
b := p.txn.NewBatch()
if err := p.writeTableDescToBatch(ctx, tableDesc, b); err != nil {
return err
}
return p.txn.Run(ctx, b)
}
func (p *planner) | (
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if tableDesc.IsVirtualTable() {
return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc)
}
if tableDesc.IsNew() {
if err := runSchemaChangesInTxn(
ctx, p, tableDesc, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
); err != nil {
return err
}
}
if err := catalog.ValidateSelf(tableDesc); err != nil {
return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc)
}
return p.Descriptors().WriteDescToBatch(
ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), tableDesc, b,
)
}
| writeTableDescToBatch | identifier_name |
table.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"strings"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func (p *planner) getVirtualTabler() VirtualTabler {
return p.extendedEvalCtx.VirtualSchemas
}
// createDropDatabaseJob queues a job for dropping a database.
func (p *planner) createDropDatabaseJob(
ctx context.Context,
databaseID descpb.ID,
schemasToDrop []descpb.ID,
tableDropDetails []jobspb.DroppedTableDetails,
typesToDrop []*typedesc.Mutable,
jobDesc string,
) error {
// TODO (lucy): This should probably be deleting the queued jobs for all the
// tables being dropped, so that we don't have duplicate schema changers.
tableIDs := make([]descpb.ID, 0, len(tableDropDetails))
for _, d := range tableDropDetails {
tableIDs = append(tableIDs, d.ID)
}
typeIDs := make([]descpb.ID, 0, len(typesToDrop))
for _, t := range typesToDrop {
typeIDs = append(typeIDs, t.ID)
}
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: tableIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: schemasToDrop,
DroppedTables: tableDropDetails,
DroppedTypes: typeIDs,
DroppedDatabaseID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new drop database job %d for database %d", newJob.ID(), databaseID)
return nil
}
// CreateNonDropDatabaseChangeJob covers all database descriptor updates other
// than dropping the database.
// TODO (lucy): This should ideally look into the set of queued jobs so that we
// don't queue multiple jobs for the same database.
func (p *planner) createNonDropDatabaseChangeJob(
ctx context.Context, databaseID descpb.ID, jobDesc string,
) error {
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new database schema change job %d for database %d", newJob.ID(), databaseID)
return nil
}
// createOrUpdateSchemaChangeJob queues a new job for the schema change if there
// is no existing schema change job for the table, or updates the existing job
// if there is one.
func (p *planner) createOrUpdateSchemaChangeJob(
ctx context.Context, tableDesc *tabledesc.Mutable, jobDesc string, mutationID descpb.MutationID,
) error {
var job *jobs.Job
if cachedJob, ok := p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID]; ok |
if p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup != nil {
p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup(job)
}
var spanList []jobspb.ResumeSpanList
jobExists := job != nil
if jobExists {
spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList
}
span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec)
for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ {
spanList = append(spanList,
jobspb.ResumeSpanList{
ResumeSpans: []roachpb.Span{span},
},
)
}
if !jobExists {
// Queue a new job.
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: descpb.IDs{tableDesc.GetID()},
Details: jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: mutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID] = newJob
// Only add a MutationJob if there's an associated mutation.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
if mutationID != descpb.InvalidMutationID {
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(newJob.ID())})
}
log.Infof(ctx, "queued new schema change job %d for table %d, mutation %d",
newJob.ID(), tableDesc.ID, mutationID)
} else {
// Update the existing job.
oldDetails := job.Details().(jobspb.SchemaChangeDetails)
newDetails := jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: oldDetails.TableMutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
}
if oldDetails.TableMutationID != descpb.InvalidMutationID {
// The previous queued schema change job was associated with a mutation,
// which must have the same mutation ID as this schema change, so just
// check for consistency.
if mutationID != descpb.InvalidMutationID && mutationID != oldDetails.TableMutationID {
return errors.AssertionFailedf(
"attempted to update job for mutation %d, but job already exists with mutation %d",
mutationID, oldDetails.TableMutationID)
}
} else {
// The previous queued schema change job didn't have a mutation.
if mutationID != descpb.InvalidMutationID {
newDetails.TableMutationID = mutationID
// Also add a MutationJob on the table descriptor.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(job.ID())})
}
}
if err := job.SetDetails(ctx, p.txn, newDetails); err != nil {
return err
}
if jobDesc != "" {
if err := job.SetDescription(
ctx, p.txn,
func(ctx context.Context, description string) (string, error) {
return strings.Join([]string{description, jobDesc}, ";"), nil
},
); err != nil {
return err
}
}
log.Infof(ctx, "job %d: updated with schema change for table %d, mutation %d",
job.ID(), tableDesc.ID, mutationID)
}
return nil
}
// writeSchemaChange effectively writes a table descriptor to the
// database within the current planner transaction, and queues up
// a schema changer for future processing.
// TODO (lucy): The way job descriptions are handled needs improvement.
// Currently, whenever we update a job, the provided job description string, if
// non-empty, is appended to the end of the existing description, regardless of
// whether the particular schema change written in this method call came from a
// separate statement in the same transaction, or from updating a dependent
// table descriptor during a schema change to another table, or from a step in a
// larger schema change to the same table.
func (p *planner) writeSchemaChange(
ctx context.Context, tableDesc *tabledesc.Mutable, mutationID descpb.MutationID, jobDesc string,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
if !tableDesc.IsNew() {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeSchemaChangeToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
return p.writeTableDescToBatch(ctx, tableDesc, b)
}
func (p *planner) writeDropTable(
ctx context.Context, tableDesc *tabledesc.Mutable, queueJob bool, jobDesc string,
) error {
if queueJob {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, descpb.InvalidMutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeTableDesc(ctx context.Context, tableDesc *tabledesc.Mutable) error {
b := p.txn.NewBatch()
if err := p.writeTableDescToBatch(ctx, tableDesc, b); err != nil {
return err
}
return p.txn.Run(ctx, b)
}
func (p *planner) writeTableDescToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if tableDesc.IsVirtualTable() {
return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc)
}
if tableDesc.IsNew() {
if err := runSchemaChangesInTxn(
ctx, p, tableDesc, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
); err != nil {
return err
}
}
if err := catalog.ValidateSelf(tableDesc); err != nil {
return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc)
}
return p.Descriptors().WriteDescToBatch(
ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), tableDesc, b,
)
}
| {
job = cachedJob
} | conditional_block |
table.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"strings"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func (p *planner) getVirtualTabler() VirtualTabler {
return p.extendedEvalCtx.VirtualSchemas
}
// createDropDatabaseJob queues a job for dropping a database.
func (p *planner) createDropDatabaseJob(
ctx context.Context,
databaseID descpb.ID,
schemasToDrop []descpb.ID,
tableDropDetails []jobspb.DroppedTableDetails,
typesToDrop []*typedesc.Mutable,
jobDesc string,
) error {
// TODO (lucy): This should probably be deleting the queued jobs for all the
// tables being dropped, so that we don't have duplicate schema changers.
tableIDs := make([]descpb.ID, 0, len(tableDropDetails))
for _, d := range tableDropDetails {
tableIDs = append(tableIDs, d.ID)
}
typeIDs := make([]descpb.ID, 0, len(typesToDrop))
for _, t := range typesToDrop {
typeIDs = append(typeIDs, t.ID)
}
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: tableIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: schemasToDrop,
DroppedTables: tableDropDetails,
DroppedTypes: typeIDs,
DroppedDatabaseID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new drop database job %d for database %d", newJob.ID(), databaseID)
return nil
}
// CreateNonDropDatabaseChangeJob covers all database descriptor updates other
// than dropping the database.
// TODO (lucy): This should ideally look into the set of queued jobs so that we | ctx context.Context, databaseID descpb.ID, jobDesc string,
) error {
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new database schema change job %d for database %d", newJob.ID(), databaseID)
return nil
}
// createOrUpdateSchemaChangeJob queues a new job for the schema change if there
// is no existing schema change job for the table, or updates the existing job
// if there is one.
func (p *planner) createOrUpdateSchemaChangeJob(
ctx context.Context, tableDesc *tabledesc.Mutable, jobDesc string, mutationID descpb.MutationID,
) error {
var job *jobs.Job
if cachedJob, ok := p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID]; ok {
job = cachedJob
}
if p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup != nil {
p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup(job)
}
var spanList []jobspb.ResumeSpanList
jobExists := job != nil
if jobExists {
spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList
}
span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec)
for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ {
spanList = append(spanList,
jobspb.ResumeSpanList{
ResumeSpans: []roachpb.Span{span},
},
)
}
if !jobExists {
// Queue a new job.
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: descpb.IDs{tableDesc.GetID()},
Details: jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: mutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID] = newJob
// Only add a MutationJob if there's an associated mutation.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
if mutationID != descpb.InvalidMutationID {
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(newJob.ID())})
}
log.Infof(ctx, "queued new schema change job %d for table %d, mutation %d",
newJob.ID(), tableDesc.ID, mutationID)
} else {
// Update the existing job.
oldDetails := job.Details().(jobspb.SchemaChangeDetails)
newDetails := jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: oldDetails.TableMutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
}
if oldDetails.TableMutationID != descpb.InvalidMutationID {
// The previous queued schema change job was associated with a mutation,
// which must have the same mutation ID as this schema change, so just
// check for consistency.
if mutationID != descpb.InvalidMutationID && mutationID != oldDetails.TableMutationID {
return errors.AssertionFailedf(
"attempted to update job for mutation %d, but job already exists with mutation %d",
mutationID, oldDetails.TableMutationID)
}
} else {
// The previous queued schema change job didn't have a mutation.
if mutationID != descpb.InvalidMutationID {
newDetails.TableMutationID = mutationID
// Also add a MutationJob on the table descriptor.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(job.ID())})
}
}
if err := job.SetDetails(ctx, p.txn, newDetails); err != nil {
return err
}
if jobDesc != "" {
if err := job.SetDescription(
ctx, p.txn,
func(ctx context.Context, description string) (string, error) {
return strings.Join([]string{description, jobDesc}, ";"), nil
},
); err != nil {
return err
}
}
log.Infof(ctx, "job %d: updated with schema change for table %d, mutation %d",
job.ID(), tableDesc.ID, mutationID)
}
return nil
}
// writeSchemaChange effectively writes a table descriptor to the
// database within the current planner transaction, and queues up
// a schema changer for future processing.
// TODO (lucy): The way job descriptions are handled needs improvement.
// Currently, whenever we update a job, the provided job description string, if
// non-empty, is appended to the end of the existing description, regardless of
// whether the particular schema change written in this method call came from a
// separate statement in the same transaction, or from updating a dependent
// table descriptor during a schema change to another table, or from a step in a
// larger schema change to the same table.
func (p *planner) writeSchemaChange(
ctx context.Context, tableDesc *tabledesc.Mutable, mutationID descpb.MutationID, jobDesc string,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
if !tableDesc.IsNew() {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeSchemaChangeToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
return p.writeTableDescToBatch(ctx, tableDesc, b)
}
func (p *planner) writeDropTable(
ctx context.Context, tableDesc *tabledesc.Mutable, queueJob bool, jobDesc string,
) error {
if queueJob {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, descpb.InvalidMutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeTableDesc(ctx context.Context, tableDesc *tabledesc.Mutable) error {
b := p.txn.NewBatch()
if err := p.writeTableDescToBatch(ctx, tableDesc, b); err != nil {
return err
}
return p.txn.Run(ctx, b)
}
func (p *planner) writeTableDescToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if tableDesc.IsVirtualTable() {
return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc)
}
if tableDesc.IsNew() {
if err := runSchemaChangesInTxn(
ctx, p, tableDesc, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
); err != nil {
return err
}
}
if err := catalog.ValidateSelf(tableDesc); err != nil {
return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc)
}
return p.Descriptors().WriteDescToBatch(
ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), tableDesc, b,
)
} | // don't queue multiple jobs for the same database.
func (p *planner) createNonDropDatabaseChangeJob( | random_line_split |
table.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"strings"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func (p *planner) getVirtualTabler() VirtualTabler {
return p.extendedEvalCtx.VirtualSchemas
}
// createDropDatabaseJob queues a job for dropping a database.
func (p *planner) createDropDatabaseJob(
ctx context.Context,
databaseID descpb.ID,
schemasToDrop []descpb.ID,
tableDropDetails []jobspb.DroppedTableDetails,
typesToDrop []*typedesc.Mutable,
jobDesc string,
) error |
// CreateNonDropDatabaseChangeJob covers all database descriptor updates other
// than dropping the database.
// TODO (lucy): This should ideally look into the set of queued jobs so that we
// don't queue multiple jobs for the same database.
func (p *planner) createNonDropDatabaseChangeJob(
ctx context.Context, databaseID descpb.ID, jobDesc string,
) error {
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new database schema change job %d for database %d", newJob.ID(), databaseID)
return nil
}
// createOrUpdateSchemaChangeJob queues a new job for the schema change if there
// is no existing schema change job for the table, or updates the existing job
// if there is one.
func (p *planner) createOrUpdateSchemaChangeJob(
ctx context.Context, tableDesc *tabledesc.Mutable, jobDesc string, mutationID descpb.MutationID,
) error {
var job *jobs.Job
if cachedJob, ok := p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID]; ok {
job = cachedJob
}
if p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup != nil {
p.extendedEvalCtx.ExecCfg.TestingKnobs.RunAfterSCJobsCacheLookup(job)
}
var spanList []jobspb.ResumeSpanList
jobExists := job != nil
if jobExists {
spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList
}
span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec)
for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ {
spanList = append(spanList,
jobspb.ResumeSpanList{
ResumeSpans: []roachpb.Span{span},
},
)
}
if !jobExists {
// Queue a new job.
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: descpb.IDs{tableDesc.GetID()},
Details: jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: mutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
p.extendedEvalCtx.SchemaChangeJobCache[tableDesc.ID] = newJob
// Only add a MutationJob if there's an associated mutation.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
if mutationID != descpb.InvalidMutationID {
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(newJob.ID())})
}
log.Infof(ctx, "queued new schema change job %d for table %d, mutation %d",
newJob.ID(), tableDesc.ID, mutationID)
} else {
// Update the existing job.
oldDetails := job.Details().(jobspb.SchemaChangeDetails)
newDetails := jobspb.SchemaChangeDetails{
DescID: tableDesc.ID,
TableMutationID: oldDetails.TableMutationID,
ResumeSpanList: spanList,
// The version distinction for database jobs doesn't matter for jobs on
// tables.
FormatVersion: jobspb.DatabaseJobFormatVersion,
}
if oldDetails.TableMutationID != descpb.InvalidMutationID {
// The previous queued schema change job was associated with a mutation,
// which must have the same mutation ID as this schema change, so just
// check for consistency.
if mutationID != descpb.InvalidMutationID && mutationID != oldDetails.TableMutationID {
return errors.AssertionFailedf(
"attempted to update job for mutation %d, but job already exists with mutation %d",
mutationID, oldDetails.TableMutationID)
}
} else {
// The previous queued schema change job didn't have a mutation.
if mutationID != descpb.InvalidMutationID {
newDetails.TableMutationID = mutationID
// Also add a MutationJob on the table descriptor.
// TODO (lucy): get rid of this when we get rid of MutationJobs.
tableDesc.MutationJobs = append(tableDesc.MutationJobs, descpb.TableDescriptor_MutationJob{
MutationID: mutationID, JobID: int64(job.ID())})
}
}
if err := job.SetDetails(ctx, p.txn, newDetails); err != nil {
return err
}
if jobDesc != "" {
if err := job.SetDescription(
ctx, p.txn,
func(ctx context.Context, description string) (string, error) {
return strings.Join([]string{description, jobDesc}, ";"), nil
},
); err != nil {
return err
}
}
log.Infof(ctx, "job %d: updated with schema change for table %d, mutation %d",
job.ID(), tableDesc.ID, mutationID)
}
return nil
}
// writeSchemaChange effectively writes a table descriptor to the
// database within the current planner transaction, and queues up
// a schema changer for future processing.
// TODO (lucy): The way job descriptions are handled needs improvement.
// Currently, whenever we update a job, the provided job description string, if
// non-empty, is appended to the end of the existing description, regardless of
// whether the particular schema change written in this method call came from a
// separate statement in the same transaction, or from updating a dependent
// table descriptor during a schema change to another table, or from a step in a
// larger schema change to the same table.
func (p *planner) writeSchemaChange(
ctx context.Context, tableDesc *tabledesc.Mutable, mutationID descpb.MutationID, jobDesc string,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
if !tableDesc.IsNew() {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeSchemaChangeToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if !p.EvalContext().TxnImplicit {
telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
}
if tableDesc.Dropped() {
// We don't allow schema changes on a dropped table.
return errors.Errorf("no schema changes allowed on table %q as it is being dropped",
tableDesc.Name)
}
return p.writeTableDescToBatch(ctx, tableDesc, b)
}
func (p *planner) writeDropTable(
ctx context.Context, tableDesc *tabledesc.Mutable, queueJob bool, jobDesc string,
) error {
if queueJob {
if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, descpb.InvalidMutationID); err != nil {
return err
}
}
return p.writeTableDesc(ctx, tableDesc)
}
func (p *planner) writeTableDesc(ctx context.Context, tableDesc *tabledesc.Mutable) error {
b := p.txn.NewBatch()
if err := p.writeTableDescToBatch(ctx, tableDesc, b); err != nil {
return err
}
return p.txn.Run(ctx, b)
}
func (p *planner) writeTableDescToBatch(
ctx context.Context, tableDesc *tabledesc.Mutable, b *kv.Batch,
) error {
if tableDesc.IsVirtualTable() {
return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc)
}
if tableDesc.IsNew() {
if err := runSchemaChangesInTxn(
ctx, p, tableDesc, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
); err != nil {
return err
}
}
if err := catalog.ValidateSelf(tableDesc); err != nil {
return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc)
}
return p.Descriptors().WriteDescToBatch(
ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), tableDesc, b,
)
}
| {
// TODO (lucy): This should probably be deleting the queued jobs for all the
// tables being dropped, so that we don't have duplicate schema changers.
tableIDs := make([]descpb.ID, 0, len(tableDropDetails))
for _, d := range tableDropDetails {
tableIDs = append(tableIDs, d.ID)
}
typeIDs := make([]descpb.ID, 0, len(typesToDrop))
for _, t := range typesToDrop {
typeIDs = append(typeIDs, t.ID)
}
jobRecord := jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: tableIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: schemasToDrop,
DroppedTables: tableDropDetails,
DroppedTypes: typeIDs,
DroppedDatabaseID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
newJob, err := p.extendedEvalCtx.QueueJob(ctx, jobRecord)
if err != nil {
return err
}
log.Infof(ctx, "queued new drop database job %d for database %d", newJob.ID(), databaseID)
return nil
} | identifier_body |
Script.js | var forceExecute = false;
// разбор аргументов командной строки
function getArgs() {
var args = new Object();
var query = location.search.substring(1).toLowerCase();
var pairs = query.split("&");
for (var i = 0; i < pairs.length; i++) {
var pos = pairs[i].indexOf('=');
if (-1 == pos) continue;
var argname = pairs[i].substring(0, pos);
var value = pairs[i].substring(pos + 1);
args[argname] = unescape(value);
}
return args;
}
var v_data_copy = {};
function InitDocuments() {
//Локализация
LocalizeHtmlTitles();
LoadXslt("Data_" + getCurrentPageLanguage() + ".xsl");
v_data[3] = 'REF DESC';//order
var Params = unescape(location.search.substring(1, location.search.length));
var args = getArgs();
//default filter
v_data[1] = "";
v_data[10] = null;
//-- просмотр документов отд/полз
v_data[12] = args.type;
if (args.par.charAt(1) == '1') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message1');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_OUT";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(1) == '2') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message4');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_IN";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_IN";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(0) == '1') document.getElementById("lb_DocTitles").innerText += LocalizedString('Message5');
if (args.par.charAt(0) == '2') {
if (null == args.date && null == args.dateb && null == args.datef) {
alert(LocalizedString('Message6'));
return;
}
if (args.dateb && args.datef) {
v_data[9] = args.dateb;
v_data[10] = args.datef;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[9] + " по " + v_data[10] + " :";
}
else {
v_data[9] = "";
v_data[10] = args.date;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[10] + " : ";
}
}
var obj = new Object();
obj.v_serviceObjName = 'webService';
obj.v_serviceName = 'Service.asmx';
obj.v_serviceMethod = 'GetData';
obj.v_serviceFuncAfter = "LoadDocuments";
var menu = new Array();
menu[LocalizedString('Message8')] = "OpenDoc()";
menu[LocalizedString('Message10')] = "CreateSameDocument()";
menu["Редагувати дод. реквізити"] = "EditProps()";
obj.v_menuItems = menu;
obj.v_filterInMenu = false;
obj.v_enableViewState = true; //включаем ViewState
obj.v_alignPager = "left";//пейджинг слева
obj.v_showFilterOnStart = true;
obj.v_filterTable = "oper";
//------функции-----
obj.v_serviceFuncAfter = 'AfterLoadFunction';
fn_InitVariables(obj);
InitGrid();
/*для друку документів*/
var port = (location.port != "") ? (":" + location.port) : ("");
document.all.webService.useService(location.protocol + "//" + location.hostname + port + "/barsroot/docinput/DocService.asmx?wsdl", "Doc");
var printTrnModel = getCookie("prnModel");
if (printTrnModel) {
document.getElementById("cbPrintTrnModel").checked = (printTrnModel == 1) ? (true) : (false);
}
v_data_copy.data = v_data;
}
function AfterLoadFunction() {
// кол-во и сумма документов
document.getElementById("lb_DocCount").innerText = '(к-ть: ' + returnServiceValue[2].text + '; сума: ' + returnServiceValue[3].text + ' грн.)';
insertXslRowSelectionTooltip();
}
function getCookie(par) {
var pageCookie = document.cookie;
var pos = pageCookie.indexOf(par + '=');
if (pos != -1) {
var start = pos + par.length + 1;
var end = pageCookie.indexOf(';', start);
if (end == -1) end = pageCookie.length;
var value = pageCookie.substring(start, end);
value = unescape(value);
return value;
}
}
/*******************************/
var arrayForPrint = new Array();//масив з референсами відмічених документів
var selectedSumsArray = new Array();
function editSelectedSumsArray(elem) {
var amount = +$(elem).attr('data-sum');
var sum = 0;
if ($(elem).prop('checked')) {
selectedSumsArray.push(amount);
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
} else {
var num = -1;
for (var i = 0; i < selectedSumsArray.length; i++) {
if (selectedSumsArray[i] === amount) {
num = i;
}
sum += selectedSumsArray[i];
}
if (num !== -1) {
sum -= selectedSumsArray[num];
selectedSumsArray.splice(num, 1);
}
}
if (selectedSumsArray.length > 0) {
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
else {
$("#lb_DocCountSelected").css("display", "none");
}
}
function addCheckbox() {
arrayForPrint.splice(0, arrayForPrint.length);
$('#printPanel').hide();
}
function editArrayForPrint(elem, ref) {
ref = $(elem).attr('data-ref');
if ($(elem).prop('checked')) {
arrayForPrint.push(ref);
}
else {
$('#mainChBox').removeAttr('checked');
var num = -1;
for (var i = 0; i < arrayForPrint.length; i++) {
if (arrayForPrint[i] == ref) {
num = i;
}
}
if (num != -1) {
arrayForPrint.splice(num, 1)
}
}
if (arrayForPrint.length > 0) {
$('#printPanel').show();
}
else {
$('#printPanel').hide();
}
}
function selAllCheckbox(elem) {
arrayForPrint.splice(0, arrayForPrint.length);
selectedSumsArray.splice(0, selectedSumsArray.length);
if ($(elem).prop('checked')) {
var allChBox = $('#oTable tr td input[type="checkbox"]');
allChBox.attr('checked', 'checked');
allChBox.each(function (index, elem) {
if (index > 0) {
var ref = $(elem).attr('data-ref');
if (ref) arrayForPrint.push(ref);
var amount = +$(elem).attr('data-sum');
selectedSumsArray.push(amount);
}
});
if (arrayForPrint.length > 0) $('#printPanel').show();
if (selectedSumsArray.length > 0) {
var sum = 0;
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
}
else {
$('#oTable tr td input[type="checkbox"]').removeAttr('checked');
$('#printPanel').hide();
$("#lb_DocCountSelected").css("display", "none");
}
}
function printSelDocum() {
if (arrayForPrint.length > 0)
getTicketFile(arrayForPrint);
return false;
}
function getTicketFile(ref) {
if ("" != ref)
document.all.webService.Doc.callService(onPrint, "GetArrayFileForPrint", ref, document.getElementById("cbPrintTrnModel").checked);
return false;
}
function onPrint(result) {
if (!getError(result)) return;
var arrPatch = result.value.split('~~$$~~');
for (var i = 0; i < arrPatch.length; i++) {
barsie$print(arrPatch[i]);
}
}
function getError(result, modal) {
if (result.error) {
if (window.dialogArguments || parent.frames.length == 0 || modal) {
window.showModalDialog("dialog.aspx?type=err", "", "dialogWidth:800px;center:yes;edge:sunken;help:no;status:no;"); | /docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//Load Default.aspx
function LoadDocuments() {
addCheckbox();
//--empty--
}
//открываем карточку документа
function OpenDoc(ref, winName) {
var target = typeof winName !== 'undefined' ? winName : '_self';
if (ref == null) ref = selectedRowId;
window.showModalDialog("/barsroot/documentview/default.aspx?ref=" + ref, null, 'dialogWidth:790px;dialogHeight:550px');
//window.open("/barsroot/documentview/default.aspx?ref=" + ref, target, true);
}
function EditProps() {
window.showModalDialog("/barsroot/docinput/editprops.aspx?ref=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//--рефреш
function RefreshButtonPressed() {
selectedSumsArray.splice(0, selectedSumsArray.length);
$("#lb_DocCountSelected").css("display", "none");
ReInitGrid();
}
//--фильтр--
function FilterButtonPressed() {
ShowFilter();
}
//Локализация
function LocalizeHtmlTitles() {
LocalizeHtmlTitle("bt_Filter");
LocalizeHtmlTitle("bt_Refresh");
}
function fnExportToExcel() {
var recordsAmount;
if ($.isNumeric(returnServiceValue[2].text)) {
recordsAmount = Number(returnServiceValue[2].text);
if (recordsAmount > 2000) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + recordsAmount + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
});
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
} else {
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
}
}
function onExportExcel(result) {
if (!getError(result)) return;
if (-1 === result.value.indexOf(".xls")) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + result.value + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
fnExportToExcel();
}
});
} else {
forceExecute = false;
location.href = "/barsroot/cim/handler.ashx?action=download&fname=accounts&file=" + result.value + "&fext=xlsx";
}
} |
}
else
location.replace("dialog.aspx?type=err");
return false;
}
return true;
}
//Продублировать документ
function CreateSameDocument() {
//location.replace("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId);
window.showModalDialog("/barsroot/docinput | identifier_body |
Script.js | var forceExecute = false;
// разбор аргументов командной строки
function getArgs() {
var args = new Object();
var query = location.search.substring(1).toLowerCase();
var pairs = query.split("&");
for (var i = 0; i < pairs.length; i++) {
var pos = pairs[i].indexOf('=');
if (-1 == pos) continue;
var argname = pairs[i].substring(0, pos);
var value = pairs[i].substring(pos + 1);
args[argname] = unescape(value);
}
return args;
}
var v_data_copy = {};
function InitDocuments() {
//Локализация
LocalizeHtmlTitles();
LoadXslt("Data_" + getCurrentPageLanguage() + ".xsl");
v_data[3] = 'REF DESC';//order
var Params = unescape(location.search.substring(1, location.search.length));
var args = getArgs();
//default filter
v_data[1] = "";
v_data[10] = null;
//-- просмотр документов отд/полз
v_data[12] = args.type;
if (args.par.charAt(1) == '1') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message1');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_OUT";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(1) == '2') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message4');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_IN";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_IN";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(0) == '1') document.getElementById("lb_DocTitles").innerText += LocalizedString('Message5');
if (args.par.charAt(0) == '2') {
if (null == args.date && null == args.dateb && null == args.datef) {
alert(LocalizedString('Message6'));
return;
}
if (args.dateb && args.datef) {
v_data[9] = args.dateb;
v_data[10] = args.datef;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[9] + " по " + v_data[10] + " :";
}
else {
v_data[9] = "";
v_data[10] = args.date;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[10] + " : ";
}
}
var obj = new Object();
obj.v_serviceObjName = 'webService';
obj.v_serviceName = 'Service.asmx';
obj.v_serviceMethod = 'GetData';
obj.v_serviceFuncAfter = "LoadDocuments";
var menu = new Array();
menu[LocalizedString('Message8')] = "OpenDoc()";
menu[LocalizedString('Message10')] = "CreateSameDocument()";
menu["Редагувати дод. реквізити"] = "EditProps()";
obj.v_menuItems = menu;
obj.v_filterInMenu = false;
obj.v_enableViewState = true; //включаем ViewState
obj.v_alignPager = "left";//пейджинг слева
obj.v_showFilterOnStart = true;
obj.v_filterTable = "oper";
//------функции-----
obj.v_serviceFuncAfter = 'AfterLoadFunction';
fn_InitVariables(obj);
InitGrid();
/*для друку документів*/
var port = (location.port != "") ? (":" + location.port) : ("");
document.all.webService.useService(location.protocol + "//" + location.hostname + port + "/barsroot/docinput/DocService.asmx?wsdl", "Doc");
var printTrnModel = getCookie("prnModel");
if (printTrnModel) {
document.getElementById("cbPrintTrnModel").checked = (printTrnModel == 1) ? (true) : (false);
}
v_data_copy.data = v_data;
}
function AfterLoadFunction() {
// кол-во и сумма документов
document.getElementById("lb_DocCount").innerText = '(к-ть: ' + returnServiceValue[2].text + '; сума: ' + returnServiceValue[3].text + ' грн.)';
insertXslRowSelectionTooltip();
}
function getCookie(par) {
var pageCookie = document.cookie;
var pos = pageCookie.indexOf(par + '=');
if (pos != -1) {
var start = pos + par.length + 1;
var end = pageCookie.indexOf(';', start);
if (end == -1) end = pageCookie.length;
var value = pageCookie.substring(start, end);
value = unescape(value);
return value;
}
}
/*******************************/
var arrayForPrint = new Array();//масив з референсами відмічених документів
var selectedSumsArray = new Array();
| function editSelectedSumsArray(elem) {
var amount = +$(elem).attr('data-sum');
var sum = 0;
if ($(elem).prop('checked')) {
selectedSumsArray.push(amount);
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
} else {
var num = -1;
for (var i = 0; i < selectedSumsArray.length; i++) {
if (selectedSumsArray[i] === amount) {
num = i;
}
sum += selectedSumsArray[i];
}
if (num !== -1) {
sum -= selectedSumsArray[num];
selectedSumsArray.splice(num, 1);
}
}
if (selectedSumsArray.length > 0) {
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
else {
$("#lb_DocCountSelected").css("display", "none");
}
}
function addCheckbox() {
arrayForPrint.splice(0, arrayForPrint.length);
$('#printPanel').hide();
}
function editArrayForPrint(elem, ref) {
ref = $(elem).attr('data-ref');
if ($(elem).prop('checked')) {
arrayForPrint.push(ref);
}
else {
$('#mainChBox').removeAttr('checked');
var num = -1;
for (var i = 0; i < arrayForPrint.length; i++) {
if (arrayForPrint[i] == ref) {
num = i;
}
}
if (num != -1) {
arrayForPrint.splice(num, 1)
}
}
if (arrayForPrint.length > 0) {
$('#printPanel').show();
}
else {
$('#printPanel').hide();
}
}
function selAllCheckbox(elem) {
arrayForPrint.splice(0, arrayForPrint.length);
selectedSumsArray.splice(0, selectedSumsArray.length);
if ($(elem).prop('checked')) {
var allChBox = $('#oTable tr td input[type="checkbox"]');
allChBox.attr('checked', 'checked');
allChBox.each(function (index, elem) {
if (index > 0) {
var ref = $(elem).attr('data-ref');
if (ref) arrayForPrint.push(ref);
var amount = +$(elem).attr('data-sum');
selectedSumsArray.push(amount);
}
});
if (arrayForPrint.length > 0) $('#printPanel').show();
if (selectedSumsArray.length > 0) {
var sum = 0;
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
}
else {
$('#oTable tr td input[type="checkbox"]').removeAttr('checked');
$('#printPanel').hide();
$("#lb_DocCountSelected").css("display", "none");
}
}
function printSelDocum() {
if (arrayForPrint.length > 0)
getTicketFile(arrayForPrint);
return false;
}
function getTicketFile(ref) {
if ("" != ref)
document.all.webService.Doc.callService(onPrint, "GetArrayFileForPrint", ref, document.getElementById("cbPrintTrnModel").checked);
return false;
}
function onPrint(result) {
if (!getError(result)) return;
var arrPatch = result.value.split('~~$$~~');
for (var i = 0; i < arrPatch.length; i++) {
barsie$print(arrPatch[i]);
}
}
function getError(result, modal) {
if (result.error) {
if (window.dialogArguments || parent.frames.length == 0 || modal) {
window.showModalDialog("dialog.aspx?type=err", "", "dialogWidth:800px;center:yes;edge:sunken;help:no;status:no;");
}
else
location.replace("dialog.aspx?type=err");
return false;
}
return true;
}
//Продублировать документ
function CreateSameDocument() {
//location.replace("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId);
window.showModalDialog("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//Load Default.aspx
function LoadDocuments() {
addCheckbox();
//--empty--
}
//открываем карточку документа
function OpenDoc(ref, winName) {
var target = typeof winName !== 'undefined' ? winName : '_self';
if (ref == null) ref = selectedRowId;
window.showModalDialog("/barsroot/documentview/default.aspx?ref=" + ref, null, 'dialogWidth:790px;dialogHeight:550px');
//window.open("/barsroot/documentview/default.aspx?ref=" + ref, target, true);
}
function EditProps() {
window.showModalDialog("/barsroot/docinput/editprops.aspx?ref=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//--рефреш
function RefreshButtonPressed() {
selectedSumsArray.splice(0, selectedSumsArray.length);
$("#lb_DocCountSelected").css("display", "none");
ReInitGrid();
}
//--фильтр--
function FilterButtonPressed() {
ShowFilter();
}
//Локализация
function LocalizeHtmlTitles() {
LocalizeHtmlTitle("bt_Filter");
LocalizeHtmlTitle("bt_Refresh");
}
function fnExportToExcel() {
var recordsAmount;
if ($.isNumeric(returnServiceValue[2].text)) {
recordsAmount = Number(returnServiceValue[2].text);
if (recordsAmount > 2000) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + recordsAmount + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
});
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
} else {
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
}
}
function onExportExcel(result) {
if (!getError(result)) return;
if (-1 === result.value.indexOf(".xls")) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + result.value + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
fnExportToExcel();
}
});
} else {
forceExecute = false;
location.href = "/barsroot/cim/handler.ashx?action=download&fname=accounts&file=" + result.value + "&fext=xlsx";
}
} | random_line_split | |
Script.js | var forceExecute = false;
// разбор аргументов командной строки
function getArgs() {
var args = new Object();
var query = location.search.substring(1).toLowerCase();
var pairs = query.split("&");
for (var i = 0; i < pairs.length; i++) {
var pos = pairs[i].indexOf('=');
if (-1 == pos) continue;
var argname = pairs[i].substring(0, pos);
var value = pairs[i].substring(pos + 1);
args[argname] = unescape(value);
}
return args;
}
var v_data_copy = {};
function InitDocuments() {
//Локализация
LocalizeHtmlTitles();
LoadXslt("Data_" + getCurrentPageLanguage() + ".xsl");
v_data[3] = 'REF DESC';//order
var Params = unescape(location.search.substring(1, location.search.length));
var args = getArgs();
//default filter
v_data[1] = "";
v_data[10] = null;
//-- просмотр документов отд/полз
v_data[12] = args.type;
if (args.par.charAt(1) == '1') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message1');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_OUT";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(1) == '2') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message4');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_IN";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_IN";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(0) == '1') document.getElementById("lb_DocTitles").innerText += LocalizedString('Message5');
if (args.par.charAt(0) == '2') {
if (null == args.date && null == args.dateb && null == args.datef) {
alert(LocalizedString('Message6'));
return;
}
if (args.dateb && args.datef) {
v_data[9] = args.dateb;
v_data[10] = args.datef;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[9] + " по " + v_data[10] + " :";
}
else {
v_data[9] = "";
v_data[10] = args.date;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[10] + " : ";
}
}
var obj = new Object();
obj.v_serviceObjName = 'webService';
obj.v_serviceName = 'Service.asmx';
obj.v_serviceMethod = 'GetData';
obj.v_serviceFuncAfter = "LoadDocuments";
var menu = new Array();
menu[LocalizedString('Message8')] = "OpenDoc()";
menu[LocalizedString('Message10')] = "CreateSameDocument()";
menu["Редагувати дод. реквізити"] = "EditProps()";
obj.v_menuItems = menu;
obj.v_filterInMenu = false;
obj.v_enableViewState = true; //включаем ViewState
obj.v_alignPager = "left";//пейджинг слева
obj.v_showFilterOnStart = true;
obj.v_filterTable = "oper";
//------функции-----
obj.v_serviceFuncAfter = 'AfterLoadFunction';
fn_InitVariables(obj);
InitGrid();
/*для друку документів*/
var port = (location.port != "") ? (":" + location.port) : ("");
document.all.webService.useService(location.protocol + "//" + location.hostname + port + "/barsroot/docinput/DocService.asmx?wsdl", "Doc");
var printTrnModel = getCookie("prnModel");
if (printTrnModel) {
document.getElementById("cbPrintTrnModel").checked = (printTrnModel == 1) ? (true) : (false);
}
v_data_copy.data = v_data;
}
function AfterLoadFunction() {
// кол-во и сумма документов
document.getElementById("lb_DocCount").innerText = '(к-ть: ' + returnServiceValue[2].text + '; сума: ' + returnServiceValue[3].text + ' грн.)';
insertXslRowSelectionTooltip();
}
function getCookie(par) {
var pageCookie = document.cookie;
var pos = pageCookie.indexOf(par + '=');
if (pos != -1) {
var start = pos + par.length + 1;
var end = pageCookie.indexOf(';', start);
if (end == -1) end = pageCookie.length;
var value = page | lectedSumsArray(elem) {
var amount = +$(elem).attr('data-sum');
var sum = 0;
if ($(elem).prop('checked')) {
selectedSumsArray.push(amount);
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
} else {
var num = -1;
for (var i = 0; i < selectedSumsArray.length; i++) {
if (selectedSumsArray[i] === amount) {
num = i;
}
sum += selectedSumsArray[i];
}
if (num !== -1) {
sum -= selectedSumsArray[num];
selectedSumsArray.splice(num, 1);
}
}
if (selectedSumsArray.length > 0) {
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
else {
$("#lb_DocCountSelected").css("display", "none");
}
}
function addCheckbox() {
arrayForPrint.splice(0, arrayForPrint.length);
$('#printPanel').hide();
}
function editArrayForPrint(elem, ref) {
ref = $(elem).attr('data-ref');
if ($(elem).prop('checked')) {
arrayForPrint.push(ref);
}
else {
$('#mainChBox').removeAttr('checked');
var num = -1;
for (var i = 0; i < arrayForPrint.length; i++) {
if (arrayForPrint[i] == ref) {
num = i;
}
}
if (num != -1) {
arrayForPrint.splice(num, 1)
}
}
if (arrayForPrint.length > 0) {
$('#printPanel').show();
}
else {
$('#printPanel').hide();
}
}
function selAllCheckbox(elem) {
arrayForPrint.splice(0, arrayForPrint.length);
selectedSumsArray.splice(0, selectedSumsArray.length);
if ($(elem).prop('checked')) {
var allChBox = $('#oTable tr td input[type="checkbox"]');
allChBox.attr('checked', 'checked');
allChBox.each(function (index, elem) {
if (index > 0) {
var ref = $(elem).attr('data-ref');
if (ref) arrayForPrint.push(ref);
var amount = +$(elem).attr('data-sum');
selectedSumsArray.push(amount);
}
});
if (arrayForPrint.length > 0) $('#printPanel').show();
if (selectedSumsArray.length > 0) {
var sum = 0;
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
}
else {
$('#oTable tr td input[type="checkbox"]').removeAttr('checked');
$('#printPanel').hide();
$("#lb_DocCountSelected").css("display", "none");
}
}
function printSelDocum() {
if (arrayForPrint.length > 0)
getTicketFile(arrayForPrint);
return false;
}
function getTicketFile(ref) {
if ("" != ref)
document.all.webService.Doc.callService(onPrint, "GetArrayFileForPrint", ref, document.getElementById("cbPrintTrnModel").checked);
return false;
}
function onPrint(result) {
if (!getError(result)) return;
var arrPatch = result.value.split('~~$$~~');
for (var i = 0; i < arrPatch.length; i++) {
barsie$print(arrPatch[i]);
}
}
function getError(result, modal) {
if (result.error) {
if (window.dialogArguments || parent.frames.length == 0 || modal) {
window.showModalDialog("dialog.aspx?type=err", "", "dialogWidth:800px;center:yes;edge:sunken;help:no;status:no;");
}
else
location.replace("dialog.aspx?type=err");
return false;
}
return true;
}
//Продублировать документ
function CreateSameDocument() {
//location.replace("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId);
window.showModalDialog("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//Load Default.aspx
function LoadDocuments() {
addCheckbox();
//--empty--
}
//открываем карточку документа
function OpenDoc(ref, winName) {
var target = typeof winName !== 'undefined' ? winName : '_self';
if (ref == null) ref = selectedRowId;
window.showModalDialog("/barsroot/documentview/default.aspx?ref=" + ref, null, 'dialogWidth:790px;dialogHeight:550px');
//window.open("/barsroot/documentview/default.aspx?ref=" + ref, target, true);
}
function EditProps() {
window.showModalDialog("/barsroot/docinput/editprops.aspx?ref=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//--рефреш
function RefreshButtonPressed() {
selectedSumsArray.splice(0, selectedSumsArray.length);
$("#lb_DocCountSelected").css("display", "none");
ReInitGrid();
}
//--фильтр--
function FilterButtonPressed() {
ShowFilter();
}
//Локализация
function LocalizeHtmlTitles() {
LocalizeHtmlTitle("bt_Filter");
LocalizeHtmlTitle("bt_Refresh");
}
function fnExportToExcel() {
var recordsAmount;
if ($.isNumeric(returnServiceValue[2].text)) {
recordsAmount = Number(returnServiceValue[2].text);
if (recordsAmount > 2000) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + recordsAmount + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
});
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
} else {
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
}
}
function onExportExcel(result) {
if (!getError(result)) return;
if (-1 === result.value.indexOf(".xls")) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + result.value + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
fnExportToExcel();
}
});
} else {
forceExecute = false;
location.href = "/barsroot/cim/handler.ashx?action=download&fname=accounts&file=" + result.value + "&fext=xlsx";
}
} | Cookie.substring(start, end);
value = unescape(value);
return value;
}
}
/*******************************/
var arrayForPrint = new Array();//масив з референсами відмічених документів
var selectedSumsArray = new Array();
function editSe | conditional_block |
Script.js | var forceExecute = false;
// разбор аргументов командной строки
function getArgs() {
var args = new Object();
var query = location.search.substring(1).toLowerCase();
var pairs = query.split("&");
for (var i = 0; i < pairs.length; i++) {
var pos = pairs[i].indexOf('=');
if (-1 == pos) continue;
var argname = pairs[i].substring(0, pos);
var value = pairs[i].substring(pos + 1);
args[argname] = unescape(value);
}
return args;
}
var v_data_copy = {};
function InitDocuments() {
//Локализация
LocalizeHtmlTitles();
LoadXslt("Data_" + getCurrentPageLanguage() + ".xsl");
v_data[3] = 'REF DESC';//order
var Params = unescape(location.search.substring(1, location.search.length));
var args = getArgs();
//default filter
v_data[1] = "";
v_data[10] = null;
//-- просмотр документов отд/полз
v_data[12] = args.type;
if (args.par.charAt(1) == '1') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message1');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_OUT";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(1) == '2') {
document.getElementById("lb_DocTitles").innerText = LocalizedString('Message4');
if (args.type == '0') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_IN";
}
if (args.type == '1') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message3');
v_data[11] = "V_DOCS_USER_IN";
}
if (args.type == '2') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message9');
v_data[11] = "V_DOCS_SALDO";
}
if (args.type == '3') {
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message2');
v_data[11] = "V_DOCS_TOBO_OUT_NOTVIP";
}
}
if (args.par.charAt(0) == '1') document.getElementById("lb_DocTitles").innerText += LocalizedString('Message5');
if (args.par.charAt(0) == '2') {
if (null == args.date && null == args.dateb && null == args.datef) {
alert(LocalizedString('Message6'));
return;
}
if (args.dateb && args.datef) {
v_data[9] = args.dateb;
v_data[10] = args.datef;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[9] + " по " + v_data[10] + " :";
}
else {
v_data[9] = "";
v_data[10] = args.date;
document.getElementById("lb_DocTitles").innerText += LocalizedString('Message7') + v_data[10] + " : ";
}
}
var obj = new Object();
obj.v_serviceObjName = 'webService';
obj.v_serviceName = 'Service.asmx';
obj.v_serviceMethod = 'GetData';
obj.v_serviceFuncAfter = "LoadDocuments";
var menu = new Array();
menu[LocalizedString('Message8')] = "OpenDoc()";
menu[LocalizedString('Message10')] = "CreateSameDocument()";
menu["Редагувати дод. реквізити"] = "EditProps()";
obj.v_menuItems = menu;
obj.v_filterInMenu = false;
obj.v_enableViewState = true; //включаем ViewState
obj.v_alignPager = "left";//пейджинг слева
obj.v_showFilterOnStart = true;
obj.v_filterTable = "oper";
//------функции-----
obj.v_serviceFuncAfter = 'AfterLoadFunction';
fn_InitVariables(obj);
InitGrid();
/*для друку документів*/
var port = (location.port != "") ? (":" + location.port) : ("");
document.all.webService.useService(location.protocol + "//" + location.hostname + port + "/barsroot/docinput/DocService.asmx?wsdl", "Doc");
var printTrnModel = getCookie("prnModel");
if (printTrnModel) {
document.getElementById("cbPrintTrnModel").checked = (printTrnModel == 1) ? (true) : (false);
}
v_data_copy.data = v_data;
}
function AfterLoadFunction() {
// кол-во и сумма документов
document.getElementById("lb_DocCount").innerText = '(к-ть: ' + returnServiceValue[2].text + '; сума: ' + returnServiceValue[3].text + ' грн.)';
insertXslRowSelectionTooltip();
}
function getCookie(par) {
var pageCookie = document.cookie;
var pos = pageCookie.indexOf(par + '=');
if (pos != -1) {
var start = pos + par.length + 1;
var end = pageCookie.indexOf(';', start);
if (end == -1) end = pageCookie.length;
var value = pageCookie.substring(start, end);
value = unescape(value);
return value;
}
}
/*******************************/
var arrayForPrint = new Array();//масив з референсами відмічених документів
var selectedSumsArray = new Array();
function editSelectedSumsArray(elem) {
var amount = +$(elem).attr('data-sum');
var sum = 0;
if ($(elem).prop('checked')) {
selectedSumsArray.push(amount);
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
} else {
var num = -1;
for (var i = 0; i < selectedSumsArray.length; i++) {
if (selectedSumsArray[i] === amount) {
num = i;
}
sum += selectedSumsArray[i];
}
if (num !== -1) {
sum -= selectedSumsArray[num];
selectedSumsArray.splice(num, 1);
}
}
if (selectedSumsArray.length > 0) {
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
else {
$("#lb_DocCountSelected").css("display", "none");
}
}
function addCheckbox() {
arrayForPrint.splice(0, arrayForPrint.length);
$('#printPanel').hide();
}
function editArrayForPrint(elem, ref) {
ref = $(elem).attr('data-ref');
if ($(elem).prop('checked')) {
arrayForPrint.push(ref);
}
else {
$('#mainChBox').removeAttr('checked');
var num = -1;
for (var i = 0; i < arrayForPrint.length; i++) {
if (arrayForPrint[i] == ref) {
num = i;
}
}
if (num != -1) {
arrayForPrint.splice(num, 1)
}
}
if (arrayForPrint.length > 0) {
$('#printPanel').show();
}
else {
$('#printPanel').hide();
}
}
function selAllCheckbox(elem) {
arrayForPrint.splice(0, arrayForPrint.length);
selectedSumsArray.splice(0, selectedSumsArray.length);
if ($(elem).prop('checked')) {
var allChBox = $('#oTable tr td input[type="checkbox"]');
allChBox.attr('checked', 'checked');
allChBox.each(function (index, elem) {
if (index > 0) {
var ref = $(elem).attr('data-ref');
if (ref) arrayForPrint.push(ref);
var amount = +$(elem).attr('data-sum');
selectedSumsArray.push(amount);
}
});
if (arrayForPrint.length > 0) $('#printPanel').show();
if (selectedSumsArray.length > 0) {
var sum = 0;
for (var i = 0; i < selectedSumsArray.length; i++) {
sum += selectedSumsArray[i];
}
document.getElementById('lb_DocCountSelected').innerText =
" Виділено: " + selectedSumsArray.length +
"; сума: " + sum.toFixed(2).toString().replace(/\B(?=(\d{3})+(?!\d))/g, ' ');
$("#lb_DocCountSelected").css("color", "red");
$("#lb_DocCountSelected").css("display", "inline");
}
}
else {
$('#oTable tr td input[type="checkbox"]').removeAttr('checked');
$('#printPanel').hide();
$("#lb_DocCountSelected").css("display", "none");
}
}
function printSelDocum() {
if (arrayForPrint.length > 0)
getTicketFile(arrayForPrint);
return false;
}
function getTicketFile(ref) {
if ("" != ref)
document.all.webService.Doc.callService(onPrint, "GetArrayFileForPrint", ref, document.getElementById("cbPrintTrnModel").checked);
return false;
}
function onPrint(result) {
if (!getError(result)) return;
var arrPatch = result.value.split('~~$$~~');
for (var i = 0; i < arrPatch.length; i++) {
barsie$print(arrPatch[i]);
}
}
function getError(result, modal) {
if (result.error) {
if (window.dialogArguments || parent.frames.length == 0 || modal) {
window.showModalDialog("dialog.aspx?type=err", "", "dialogWidth:800px;center:yes;edge:sunken;help:no;status:no;");
}
else
location.replace("dialog.aspx?type=err");
return false;
}
return true;
}
//Продублировать документ
function CreateSameDocument() {
//location.replace("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId);
window.showModalDialog("/barsroot/docinput/docinput.aspx?tt=" + escape(selectedRow.tt) + "&refDoc=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//Load Default.aspx
function LoadDocuments() {
addCheckbox();
//--empty--
}
//открываем карточку документа
function OpenDoc(ref, winName) {
var target = typeof winName !== 'undefined' ? winName : '_self';
if (ref == null) ref = selectedRowId;
window.showModalDialog("/barsroot/documentview/default.aspx?ref=" + ref, null, 'dialogWidth:790px;dialogHeight:550px');
//window.open("/barsroot/documentview/default.aspx?ref=" + ref, target, true);
}
function EditProps() {
window.showModalDialog("/barsroot/docinput/editprops.aspx?ref=" + selectedRowId, "", "dialogHeight:700px;dialogWidth:1280px;center:yes;edge:sunken;help:no;status:no;");
}
//--рефреш
function RefreshButtonPressed() {
selectedSumsArray.splice(0, selectedSumsArray.length);
$("#lb_DocCountSelected").css("display", "none");
ReInitGrid();
}
//--фильтр--
function FilterButtonPressed() {
ShowFilter();
}
//Локализация
function LocalizeHtmlTitles() {
LocalizeHtmlTitle("bt_Filter");
LocalizeHtmlTitle("bt_Refresh");
}
function fnExportToExcel() {
var recordsAmount;
if ($.isNumeric(returnServiceValue[2].text)) {
re | r(returnServiceValue[2].text);
if (recordsAmount > 2000) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + recordsAmount + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
});
} else {
forceExecute = true;
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
return;
}
} else {
document.all.webService.Doc.callService(onExportExcel, "ExportExcel", v_data_copy.data, forceExecute);
}
}
function onExportExcel(result) {
if (!getError(result)) return;
if (-1 === result.value.indexOf(".xls")) {
var warningMsg = "<div>Кількість запиcів на вивантаження: <strong>" + result.value + "</strong></div><br/><div>Завантаження може тривати кілька хвилин.</div><br/><div>Ви можете зберегти час, встановивши більше фільтрів пошуку.\nБажаєте встановити додаткові фільтри?</div>";
alertify.set({
labels: {
ok: "Так",
cancel: " Ні "
},
modal: true
});
alertify.confirm(warningMsg, function (e) {
if (e) {
return;
} else {
forceExecute = true;
fnExportToExcel();
}
});
} else {
forceExecute = false;
location.href = "/barsroot/cim/handler.ashx?action=download&fname=accounts&file=" + result.value + "&fext=xlsx";
}
} | cordsAmount = Numbe | identifier_name |
validator.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/compute/v1"
)
const (
serviceNameKey = "kubernetes.io/service-name"
servicePortKey = "kubernetes.io/service-port"
retryInterval = 60 * time.Second
)
var (
searchOpt *searchOptions
audience string
issuer string = "https://cloud.google.com/iap"
httpPort int = 8081
jwtValidator tokenValidator = gcpTokenValidator{}
)
func main() {
klog.InitFlags(nil)
flag.Parse()
klog.Info("starting GKE IAP Token validator")
// read the httpPort
port := os.Getenv("HTTP_PORT")
if port != "" {
portNumber, err := strconv.Atoi(port)
if err != nil {
klog.Errorf("invalid configuration, HTTP_PORT must a number, found %q", port)
os.Exit(1)
}
httpPort = portNumber
}
// search for issuer
iss := os.Getenv("OAUTH_ISSUER")
if iss != "" {
issuer = iss
}
// read backend search options
opt, err := parseSearchOptions()
if err != nil {
klog.Errorf("failed to configure search options: %v", err)
os.Exit(1)
}
searchOpt = opt
aud := os.Getenv("OAUTH_AUDIENCE")
if aud != "" {
audience = aud
} else {
// search for audience
go getRequiredClaims()
}
// setup the http handler
http.HandleFunc("/", validateJWT)
klog.V(1).Infof("Running http server on :%v", httpPort)
http.ListenAndServe(fmt.Sprintf(":%v", httpPort), nil)
}
// validateJWT validates a JWT found in the "x-goog-iap-jwt-assertion" header
// and return 200 if valid, 401 if the header is not present, and 403 if the validation fails
func validateJWT(w http.ResponseWriter, req *http.Request) {
if klog.V(3).Enabled() {
klog.Infof("request received from: %v, headers: %v", req.RemoteAddr, req.Header)
}
iapJWT := req.Header.Get("X-Goog-IAP-JWT-Assertion")
if iapJWT == "" {
klog.V(1).Infof("X-Goog-IAP-JWT-Assertion header not found")
http.Error(w, "", http.StatusUnauthorized)
return
}
if audience == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty audience, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
if issuer == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty issuer, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
ctx := context.Background()
// we pass empty as audience here because we will validate it later
payload, err := jwtValidator.Validate(ctx, iapJWT, "")
klog.V(3).Infof("payload received: %+v", payload)
if err != nil {
klog.V(1).ErrorS(err, "error validating jwt token")
http.Error(w, "", http.StatusForbidden)
return
}
// empty payload should not be possible
if payload == nil {
klog.V(1).ErrorS(nil, "null payload received")
http.Error(w, "", http.StatusForbidden)
return
}
// validate the audience
if audience != payload.Audience {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid audience, expected %s, got %s", audience, payload.Audience)
http.Error(w, "", http.StatusForbidden)
return
}
// validate the issuer
if issuer != payload.Issuer {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid issuer, expected %s, got %s", issuer, payload.Issuer)
http.Error(w, "", http.StatusForbidden)
return
}
// validate expired - this may be redundant - but we check it anyway
if payload.Expires == 0 || payload.Expires+30 < time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, expired")
http.Error(w, "", http.StatusForbidden)
return
}
// validate IssuedAt - should not be in the future
if payload.IssuedAt == 0 || payload.IssuedAt-30 > time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, emitted in the future")
http.Error(w, "", http.StatusForbidden)
return
}
w.WriteHeader(http.StatusOK)
}
// getRequiredClaims finds the audience require for token validation by querying
// the google apis
func getRequiredClaims() {
resourceService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create cloudresourcemanager api service")
}
projectsService := gcpProjectsAPIService{projectsService: resourceService.Projects}
computeService, err := compute.NewService(context.Background())
if err != nil { |
backendsService := gcpBackendsAPIService{backendServicesService: computeService.BackendServices}
// TODO: use a backoff strategy
tick := time.NewTicker(retryInterval)
defer tick.Stop()
for {
klog.V(3).Info("begin call to get the project number")
projectNumber, err := getProjectNumber(projectsService)
if err != nil {
klog.Errorf("error retrieving project number: %v", err)
}
klog.V(3).Info("begin call to get the backend service id")
backendServiceID, err := getBackendServiceID(backendsService)
if err != nil {
klog.Errorf("error retrieving backend service id: %v", err)
}
if err == nil {
audience = fmt.Sprintf("/projects/%s/global/backendServices/%s", projectNumber, backendServiceID)
klog.V(1).Infof("audience value found: %s, ready to validate requests", audience)
return
}
klog.V(1).Infof("unable to retrieve the audience value, retring in %v seconds", retryInterval.Seconds())
select {
case <-tick.C:
}
}
}
func getProjectNumber(projectsService projectsAPIService) (string, error) {
klog.V(3).Infof("begin call to projects api")
project, err := projectsService.Get(searchOpt.projectID)
if err != nil {
return "", err
}
if klog.V(3).Enabled() {
klog.Infof("project: %+v", project)
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
}
func getBackendServiceID(backendServices backendsAPIService) (string, error) {
// Get only the backends where IAP is enabled
backends, err := backendServices.List("iap.enabled=true")
if err != nil {
return "", err
}
var foundBackend *compute.BackendService
for _, backend := range backends {
klog.V(1).Infof("checking backend %s", backend.Name)
klog.V(3).Infof("backend %+v", backend)
// try to match by oauth client id first
if searchOpt.oauthClientID != "" && backend.Iap.Oauth2ClientId != searchOpt.oauthClientID {
continue
}
// search for service name
if searchOpt.serviceName != "" && !strings.Contains(backend.Name, searchOpt.serviceName) {
continue
}
if searchOpt.serviceGkeName != "" || searchOpt.serviceGkePortName != "" || searchOpt.serviceGkePortNumber > 0 {
// we need to parse the backend description now
svcDesc, err := parseBackendDescription(backend.Description)
if err != nil {
klog.V(3).Infof("parse description error: %w", err)
continue
}
// search by service name
if searchOpt.serviceGkeName != "" && svcDesc.ServiceName != searchOpt.serviceGkeNamespace+"/"+searchOpt.serviceGkeName {
continue
}
// search by port name
if searchOpt.serviceGkePortName != "" &&
(svcDesc.ServicePort.Name != searchOpt.serviceGkePortName ||
searchOpt.serviceGkeName == "") {
continue
}
// search by port number
if searchOpt.serviceGkePortNumber != 0 &&
(svcDesc.ServicePort.Number != searchOpt.serviceGkePortNumber ||
searchOpt.serviceGkeName == "") {
continue
}
}
// if we got here we found our backend
if foundBackend == nil {
foundBackend = backend
// do not break here as we will search for duplicates
} else {
klog.Warning("more than one backend service found with the provided search options")
}
}
if foundBackend != nil {
return strconv.FormatUint(foundBackend.Id, 10), nil
}
return "", fmt.Errorf("Backend service not found")
}
// parseSearchOptions returns the backend search options from environment
// variables
func parseSearchOptions() (*searchOptions, error) {
options := &searchOptions{}
options.projectID = os.Getenv("PROJECT_ID")
if options.projectID == "" {
return nil, fmt.Errorf("invalid configuration, PROJECT_ID variable not found")
}
options.oauthClientID = os.Getenv("OAUTH_CLIENT_ID")
options.serviceName = os.Getenv("SERVICE_NAME")
options.serviceGkeNamespace = os.Getenv("SERVICE_GKE_NAMESPACE")
if options.serviceGkeNamespace == "" {
options.serviceGkeNamespace = "default"
}
options.serviceGkeName = os.Getenv("SERVICE_GKE_NAME")
options.serviceGkePortName = os.Getenv("SERVICE_GKE_PORT_NAME")
portNumber := os.Getenv("SERVICE_GKE_PORT_NUMBER")
if portNumber != "" {
portNumberValue, err := strconv.Atoi(portNumber)
if err != nil {
return nil, fmt.Errorf("invalid configuration, SERVICE_GKE_PORT_NUMBER must a number, found %q", portNumber)
}
options.serviceGkePortNumber = portNumberValue
}
// vaidate search options - at least one search options
if options.oauthClientID == "" &&
options.serviceName == "" &&
options.serviceGkeName == "" {
return nil, fmt.Errorf("Invalid configuration, at least one search criteria must be specified. Set one of the variables: OAUTH_CLIENT_ID, SERVICE_NAME, SERVICE_GKE_NAME")
}
// validate search options - service number with service name
if options.serviceGkeName == "" && options.serviceGkePortName != "" {
klog.Warning("[warning] SERVICE_GKE_PORT_NAME without SERVICE_GKE_NAME, value will be ignored")
}
if options.serviceGkeName == "" && options.serviceGkePortNumber != 0 {
klog.Warning("[warning] SERVICE_GKE_PORT_NUMBER without SERVICE_GKE_NAME, value will be ignored")
}
klog.Infof("search options: %+v", options)
return options, nil
}
func parseBackendDescription(description string) (*serviceDescription, error) {
if description == "" {
return nil, fmt.Errorf("empty description")
}
data := make(map[string]interface{})
if err := json.Unmarshal([]byte(description), &data); err != nil {
return nil, fmt.Errorf("decode error: description: %s, err: %w", description, err)
}
svcDesc := serviceDescription{}
if nameValue, ok := data[serviceNameKey]; ok {
svcDesc.ServiceName = nameValue.(string)
} else {
return nil, fmt.Errorf("name key not found, key:%s, description: %s", serviceNameKey, description)
}
if portValue, ok := data[servicePortKey]; ok {
servicePort := serviceBackendPort{}
pattern := `{Name:(.*),Number:(\d*),}`
reg := regexp.MustCompile(pattern)
match := reg.FindStringSubmatch(portValue.(string))
if match == nil {
return nil, fmt.Errorf("port decode error, description: %s, must match regexp %s", description, pattern)
}
servicePort.Name = match[1]
if match[2] != "" {
port, err := strconv.Atoi(match[2])
if err != nil {
return nil, fmt.Errorf("port number parse error, description: %v, err: %w", description, err)
}
servicePort.Number = port
}
svcDesc.ServicePort = servicePort
} else {
return nil, fmt.Errorf("port key not found, key:%s, description: %s", serviceNameKey, description)
}
return &svcDesc, nil
} | klog.Fatalf("cannot create compute api service")
} | random_line_split |
validator.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/compute/v1"
)
const (
serviceNameKey = "kubernetes.io/service-name"
servicePortKey = "kubernetes.io/service-port"
retryInterval = 60 * time.Second
)
var (
searchOpt *searchOptions
audience string
issuer string = "https://cloud.google.com/iap"
httpPort int = 8081
jwtValidator tokenValidator = gcpTokenValidator{}
)
func main() {
klog.InitFlags(nil)
flag.Parse()
klog.Info("starting GKE IAP Token validator")
// read the httpPort
port := os.Getenv("HTTP_PORT")
if port != "" {
portNumber, err := strconv.Atoi(port)
if err != nil {
klog.Errorf("invalid configuration, HTTP_PORT must a number, found %q", port)
os.Exit(1)
}
httpPort = portNumber
}
// search for issuer
iss := os.Getenv("OAUTH_ISSUER")
if iss != "" {
issuer = iss
}
// read backend search options
opt, err := parseSearchOptions()
if err != nil {
klog.Errorf("failed to configure search options: %v", err)
os.Exit(1)
}
searchOpt = opt
aud := os.Getenv("OAUTH_AUDIENCE")
if aud != "" {
audience = aud
} else {
// search for audience
go getRequiredClaims()
}
// setup the http handler
http.HandleFunc("/", validateJWT)
klog.V(1).Infof("Running http server on :%v", httpPort)
http.ListenAndServe(fmt.Sprintf(":%v", httpPort), nil)
}
// validateJWT validates a JWT found in the "x-goog-iap-jwt-assertion" header
// and return 200 if valid, 401 if the header is not present, and 403 if the validation fails
func validateJWT(w http.ResponseWriter, req *http.Request) {
if klog.V(3).Enabled() {
klog.Infof("request received from: %v, headers: %v", req.RemoteAddr, req.Header)
}
iapJWT := req.Header.Get("X-Goog-IAP-JWT-Assertion")
if iapJWT == "" {
klog.V(1).Infof("X-Goog-IAP-JWT-Assertion header not found")
http.Error(w, "", http.StatusUnauthorized)
return
}
if audience == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty audience, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
if issuer == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty issuer, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
ctx := context.Background()
// we pass empty as audience here because we will validate it later
payload, err := jwtValidator.Validate(ctx, iapJWT, "")
klog.V(3).Infof("payload received: %+v", payload)
if err != nil {
klog.V(1).ErrorS(err, "error validating jwt token")
http.Error(w, "", http.StatusForbidden)
return
}
// empty payload should not be possible
if payload == nil {
klog.V(1).ErrorS(nil, "null payload received")
http.Error(w, "", http.StatusForbidden)
return
}
// validate the audience
if audience != payload.Audience {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid audience, expected %s, got %s", audience, payload.Audience)
http.Error(w, "", http.StatusForbidden)
return
}
// validate the issuer
if issuer != payload.Issuer {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid issuer, expected %s, got %s", issuer, payload.Issuer)
http.Error(w, "", http.StatusForbidden)
return
}
// validate expired - this may be redundant - but we check it anyway
if payload.Expires == 0 || payload.Expires+30 < time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, expired")
http.Error(w, "", http.StatusForbidden)
return
}
// validate IssuedAt - should not be in the future
if payload.IssuedAt == 0 || payload.IssuedAt-30 > time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, emitted in the future")
http.Error(w, "", http.StatusForbidden)
return
}
w.WriteHeader(http.StatusOK)
}
// getRequiredClaims finds the audience require for token validation by querying
// the google apis
func getRequiredClaims() {
resourceService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create cloudresourcemanager api service")
}
projectsService := gcpProjectsAPIService{projectsService: resourceService.Projects}
computeService, err := compute.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create compute api service")
}
backendsService := gcpBackendsAPIService{backendServicesService: computeService.BackendServices}
// TODO: use a backoff strategy
tick := time.NewTicker(retryInterval)
defer tick.Stop()
for {
klog.V(3).Info("begin call to get the project number")
projectNumber, err := getProjectNumber(projectsService)
if err != nil {
klog.Errorf("error retrieving project number: %v", err)
}
klog.V(3).Info("begin call to get the backend service id")
backendServiceID, err := getBackendServiceID(backendsService)
if err != nil {
klog.Errorf("error retrieving backend service id: %v", err)
}
if err == nil {
audience = fmt.Sprintf("/projects/%s/global/backendServices/%s", projectNumber, backendServiceID)
klog.V(1).Infof("audience value found: %s, ready to validate requests", audience)
return
}
klog.V(1).Infof("unable to retrieve the audience value, retring in %v seconds", retryInterval.Seconds())
select {
case <-tick.C:
}
}
}
func getProjectNumber(projectsService projectsAPIService) (string, error) |
func getBackendServiceID(backendServices backendsAPIService) (string, error) {
// Get only the backends where IAP is enabled
backends, err := backendServices.List("iap.enabled=true")
if err != nil {
return "", err
}
var foundBackend *compute.BackendService
for _, backend := range backends {
klog.V(1).Infof("checking backend %s", backend.Name)
klog.V(3).Infof("backend %+v", backend)
// try to match by oauth client id first
if searchOpt.oauthClientID != "" && backend.Iap.Oauth2ClientId != searchOpt.oauthClientID {
continue
}
// search for service name
if searchOpt.serviceName != "" && !strings.Contains(backend.Name, searchOpt.serviceName) {
continue
}
if searchOpt.serviceGkeName != "" || searchOpt.serviceGkePortName != "" || searchOpt.serviceGkePortNumber > 0 {
// we need to parse the backend description now
svcDesc, err := parseBackendDescription(backend.Description)
if err != nil {
klog.V(3).Infof("parse description error: %w", err)
continue
}
// search by service name
if searchOpt.serviceGkeName != "" && svcDesc.ServiceName != searchOpt.serviceGkeNamespace+"/"+searchOpt.serviceGkeName {
continue
}
// search by port name
if searchOpt.serviceGkePortName != "" &&
(svcDesc.ServicePort.Name != searchOpt.serviceGkePortName ||
searchOpt.serviceGkeName == "") {
continue
}
// search by port number
if searchOpt.serviceGkePortNumber != 0 &&
(svcDesc.ServicePort.Number != searchOpt.serviceGkePortNumber ||
searchOpt.serviceGkeName == "") {
continue
}
}
// if we got here we found our backend
if foundBackend == nil {
foundBackend = backend
// do not break here as we will search for duplicates
} else {
klog.Warning("more than one backend service found with the provided search options")
}
}
if foundBackend != nil {
return strconv.FormatUint(foundBackend.Id, 10), nil
}
return "", fmt.Errorf("Backend service not found")
}
// parseSearchOptions returns the backend search options from environment
// variables
func parseSearchOptions() (*searchOptions, error) {
options := &searchOptions{}
options.projectID = os.Getenv("PROJECT_ID")
if options.projectID == "" {
return nil, fmt.Errorf("invalid configuration, PROJECT_ID variable not found")
}
options.oauthClientID = os.Getenv("OAUTH_CLIENT_ID")
options.serviceName = os.Getenv("SERVICE_NAME")
options.serviceGkeNamespace = os.Getenv("SERVICE_GKE_NAMESPACE")
if options.serviceGkeNamespace == "" {
options.serviceGkeNamespace = "default"
}
options.serviceGkeName = os.Getenv("SERVICE_GKE_NAME")
options.serviceGkePortName = os.Getenv("SERVICE_GKE_PORT_NAME")
portNumber := os.Getenv("SERVICE_GKE_PORT_NUMBER")
if portNumber != "" {
portNumberValue, err := strconv.Atoi(portNumber)
if err != nil {
return nil, fmt.Errorf("invalid configuration, SERVICE_GKE_PORT_NUMBER must a number, found %q", portNumber)
}
options.serviceGkePortNumber = portNumberValue
}
// vaidate search options - at least one search options
if options.oauthClientID == "" &&
options.serviceName == "" &&
options.serviceGkeName == "" {
return nil, fmt.Errorf("Invalid configuration, at least one search criteria must be specified. Set one of the variables: OAUTH_CLIENT_ID, SERVICE_NAME, SERVICE_GKE_NAME")
}
// validate search options - service number with service name
if options.serviceGkeName == "" && options.serviceGkePortName != "" {
klog.Warning("[warning] SERVICE_GKE_PORT_NAME without SERVICE_GKE_NAME, value will be ignored")
}
if options.serviceGkeName == "" && options.serviceGkePortNumber != 0 {
klog.Warning("[warning] SERVICE_GKE_PORT_NUMBER without SERVICE_GKE_NAME, value will be ignored")
}
klog.Infof("search options: %+v", options)
return options, nil
}
func parseBackendDescription(description string) (*serviceDescription, error) {
if description == "" {
return nil, fmt.Errorf("empty description")
}
data := make(map[string]interface{})
if err := json.Unmarshal([]byte(description), &data); err != nil {
return nil, fmt.Errorf("decode error: description: %s, err: %w", description, err)
}
svcDesc := serviceDescription{}
if nameValue, ok := data[serviceNameKey]; ok {
svcDesc.ServiceName = nameValue.(string)
} else {
return nil, fmt.Errorf("name key not found, key:%s, description: %s", serviceNameKey, description)
}
if portValue, ok := data[servicePortKey]; ok {
servicePort := serviceBackendPort{}
pattern := `{Name:(.*),Number:(\d*),}`
reg := regexp.MustCompile(pattern)
match := reg.FindStringSubmatch(portValue.(string))
if match == nil {
return nil, fmt.Errorf("port decode error, description: %s, must match regexp %s", description, pattern)
}
servicePort.Name = match[1]
if match[2] != "" {
port, err := strconv.Atoi(match[2])
if err != nil {
return nil, fmt.Errorf("port number parse error, description: %v, err: %w", description, err)
}
servicePort.Number = port
}
svcDesc.ServicePort = servicePort
} else {
return nil, fmt.Errorf("port key not found, key:%s, description: %s", serviceNameKey, description)
}
return &svcDesc, nil
}
| {
klog.V(3).Infof("begin call to projects api")
project, err := projectsService.Get(searchOpt.projectID)
if err != nil {
return "", err
}
if klog.V(3).Enabled() {
klog.Infof("project: %+v", project)
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
} | identifier_body |
validator.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/compute/v1"
)
const (
serviceNameKey = "kubernetes.io/service-name"
servicePortKey = "kubernetes.io/service-port"
retryInterval = 60 * time.Second
)
var (
searchOpt *searchOptions
audience string
issuer string = "https://cloud.google.com/iap"
httpPort int = 8081
jwtValidator tokenValidator = gcpTokenValidator{}
)
func main() {
klog.InitFlags(nil)
flag.Parse()
klog.Info("starting GKE IAP Token validator")
// read the httpPort
port := os.Getenv("HTTP_PORT")
if port != "" {
portNumber, err := strconv.Atoi(port)
if err != nil {
klog.Errorf("invalid configuration, HTTP_PORT must a number, found %q", port)
os.Exit(1)
}
httpPort = portNumber
}
// search for issuer
iss := os.Getenv("OAUTH_ISSUER")
if iss != "" {
issuer = iss
}
// read backend search options
opt, err := parseSearchOptions()
if err != nil {
klog.Errorf("failed to configure search options: %v", err)
os.Exit(1)
}
searchOpt = opt
aud := os.Getenv("OAUTH_AUDIENCE")
if aud != "" {
audience = aud
} else {
// search for audience
go getRequiredClaims()
}
// setup the http handler
http.HandleFunc("/", validateJWT)
klog.V(1).Infof("Running http server on :%v", httpPort)
http.ListenAndServe(fmt.Sprintf(":%v", httpPort), nil)
}
// validateJWT validates a JWT found in the "x-goog-iap-jwt-assertion" header
// and return 200 if valid, 401 if the header is not present, and 403 if the validation fails
func validateJWT(w http.ResponseWriter, req *http.Request) {
if klog.V(3).Enabled() {
klog.Infof("request received from: %v, headers: %v", req.RemoteAddr, req.Header)
}
iapJWT := req.Header.Get("X-Goog-IAP-JWT-Assertion")
if iapJWT == "" {
klog.V(1).Infof("X-Goog-IAP-JWT-Assertion header not found")
http.Error(w, "", http.StatusUnauthorized)
return
}
if audience == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty audience, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
if issuer == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty issuer, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
ctx := context.Background()
// we pass empty as audience here because we will validate it later
payload, err := jwtValidator.Validate(ctx, iapJWT, "")
klog.V(3).Infof("payload received: %+v", payload)
if err != nil {
klog.V(1).ErrorS(err, "error validating jwt token")
http.Error(w, "", http.StatusForbidden)
return
}
// empty payload should not be possible
if payload == nil {
klog.V(1).ErrorS(nil, "null payload received")
http.Error(w, "", http.StatusForbidden)
return
}
// validate the audience
if audience != payload.Audience {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid audience, expected %s, got %s", audience, payload.Audience)
http.Error(w, "", http.StatusForbidden)
return
}
// validate the issuer
if issuer != payload.Issuer {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid issuer, expected %s, got %s", issuer, payload.Issuer)
http.Error(w, "", http.StatusForbidden)
return
}
// validate expired - this may be redundant - but we check it anyway
if payload.Expires == 0 || payload.Expires+30 < time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, expired")
http.Error(w, "", http.StatusForbidden)
return
}
// validate IssuedAt - should not be in the future
if payload.IssuedAt == 0 || payload.IssuedAt-30 > time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, emitted in the future")
http.Error(w, "", http.StatusForbidden)
return
}
w.WriteHeader(http.StatusOK)
}
// getRequiredClaims finds the audience require for token validation by querying
// the google apis
func getRequiredClaims() {
resourceService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create cloudresourcemanager api service")
}
projectsService := gcpProjectsAPIService{projectsService: resourceService.Projects}
computeService, err := compute.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create compute api service")
}
backendsService := gcpBackendsAPIService{backendServicesService: computeService.BackendServices}
// TODO: use a backoff strategy
tick := time.NewTicker(retryInterval)
defer tick.Stop()
for {
klog.V(3).Info("begin call to get the project number")
projectNumber, err := getProjectNumber(projectsService)
if err != nil {
klog.Errorf("error retrieving project number: %v", err)
}
klog.V(3).Info("begin call to get the backend service id")
backendServiceID, err := getBackendServiceID(backendsService)
if err != nil {
klog.Errorf("error retrieving backend service id: %v", err)
}
if err == nil {
audience = fmt.Sprintf("/projects/%s/global/backendServices/%s", projectNumber, backendServiceID)
klog.V(1).Infof("audience value found: %s, ready to validate requests", audience)
return
}
klog.V(1).Infof("unable to retrieve the audience value, retring in %v seconds", retryInterval.Seconds())
select {
case <-tick.C:
}
}
}
func getProjectNumber(projectsService projectsAPIService) (string, error) {
klog.V(3).Infof("begin call to projects api")
project, err := projectsService.Get(searchOpt.projectID)
if err != nil {
return "", err
}
if klog.V(3).Enabled() {
klog.Infof("project: %+v", project)
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
}
func getBackendServiceID(backendServices backendsAPIService) (string, error) {
// Get only the backends where IAP is enabled
backends, err := backendServices.List("iap.enabled=true")
if err != nil {
return "", err
}
var foundBackend *compute.BackendService
for _, backend := range backends {
klog.V(1).Infof("checking backend %s", backend.Name)
klog.V(3).Infof("backend %+v", backend)
// try to match by oauth client id first
if searchOpt.oauthClientID != "" && backend.Iap.Oauth2ClientId != searchOpt.oauthClientID {
continue
}
// search for service name
if searchOpt.serviceName != "" && !strings.Contains(backend.Name, searchOpt.serviceName) {
continue
}
if searchOpt.serviceGkeName != "" || searchOpt.serviceGkePortName != "" || searchOpt.serviceGkePortNumber > 0 {
// we need to parse the backend description now
svcDesc, err := parseBackendDescription(backend.Description)
if err != nil {
klog.V(3).Infof("parse description error: %w", err)
continue
}
// search by service name
if searchOpt.serviceGkeName != "" && svcDesc.ServiceName != searchOpt.serviceGkeNamespace+"/"+searchOpt.serviceGkeName {
continue
}
// search by port name
if searchOpt.serviceGkePortName != "" &&
(svcDesc.ServicePort.Name != searchOpt.serviceGkePortName ||
searchOpt.serviceGkeName == "") {
continue
}
// search by port number
if searchOpt.serviceGkePortNumber != 0 &&
(svcDesc.ServicePort.Number != searchOpt.serviceGkePortNumber ||
searchOpt.serviceGkeName == "") {
continue
}
}
// if we got here we found our backend
if foundBackend == nil {
foundBackend = backend
// do not break here as we will search for duplicates
} else {
klog.Warning("more than one backend service found with the provided search options")
}
}
if foundBackend != nil {
return strconv.FormatUint(foundBackend.Id, 10), nil
}
return "", fmt.Errorf("Backend service not found")
}
// parseSearchOptions returns the backend search options from environment
// variables
func | () (*searchOptions, error) {
options := &searchOptions{}
options.projectID = os.Getenv("PROJECT_ID")
if options.projectID == "" {
return nil, fmt.Errorf("invalid configuration, PROJECT_ID variable not found")
}
options.oauthClientID = os.Getenv("OAUTH_CLIENT_ID")
options.serviceName = os.Getenv("SERVICE_NAME")
options.serviceGkeNamespace = os.Getenv("SERVICE_GKE_NAMESPACE")
if options.serviceGkeNamespace == "" {
options.serviceGkeNamespace = "default"
}
options.serviceGkeName = os.Getenv("SERVICE_GKE_NAME")
options.serviceGkePortName = os.Getenv("SERVICE_GKE_PORT_NAME")
portNumber := os.Getenv("SERVICE_GKE_PORT_NUMBER")
if portNumber != "" {
portNumberValue, err := strconv.Atoi(portNumber)
if err != nil {
return nil, fmt.Errorf("invalid configuration, SERVICE_GKE_PORT_NUMBER must a number, found %q", portNumber)
}
options.serviceGkePortNumber = portNumberValue
}
// vaidate search options - at least one search options
if options.oauthClientID == "" &&
options.serviceName == "" &&
options.serviceGkeName == "" {
return nil, fmt.Errorf("Invalid configuration, at least one search criteria must be specified. Set one of the variables: OAUTH_CLIENT_ID, SERVICE_NAME, SERVICE_GKE_NAME")
}
// validate search options - service number with service name
if options.serviceGkeName == "" && options.serviceGkePortName != "" {
klog.Warning("[warning] SERVICE_GKE_PORT_NAME without SERVICE_GKE_NAME, value will be ignored")
}
if options.serviceGkeName == "" && options.serviceGkePortNumber != 0 {
klog.Warning("[warning] SERVICE_GKE_PORT_NUMBER without SERVICE_GKE_NAME, value will be ignored")
}
klog.Infof("search options: %+v", options)
return options, nil
}
func parseBackendDescription(description string) (*serviceDescription, error) {
if description == "" {
return nil, fmt.Errorf("empty description")
}
data := make(map[string]interface{})
if err := json.Unmarshal([]byte(description), &data); err != nil {
return nil, fmt.Errorf("decode error: description: %s, err: %w", description, err)
}
svcDesc := serviceDescription{}
if nameValue, ok := data[serviceNameKey]; ok {
svcDesc.ServiceName = nameValue.(string)
} else {
return nil, fmt.Errorf("name key not found, key:%s, description: %s", serviceNameKey, description)
}
if portValue, ok := data[servicePortKey]; ok {
servicePort := serviceBackendPort{}
pattern := `{Name:(.*),Number:(\d*),}`
reg := regexp.MustCompile(pattern)
match := reg.FindStringSubmatch(portValue.(string))
if match == nil {
return nil, fmt.Errorf("port decode error, description: %s, must match regexp %s", description, pattern)
}
servicePort.Name = match[1]
if match[2] != "" {
port, err := strconv.Atoi(match[2])
if err != nil {
return nil, fmt.Errorf("port number parse error, description: %v, err: %w", description, err)
}
servicePort.Number = port
}
svcDesc.ServicePort = servicePort
} else {
return nil, fmt.Errorf("port key not found, key:%s, description: %s", serviceNameKey, description)
}
return &svcDesc, nil
}
| parseSearchOptions | identifier_name |
validator.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/compute/v1"
)
const (
serviceNameKey = "kubernetes.io/service-name"
servicePortKey = "kubernetes.io/service-port"
retryInterval = 60 * time.Second
)
var (
searchOpt *searchOptions
audience string
issuer string = "https://cloud.google.com/iap"
httpPort int = 8081
jwtValidator tokenValidator = gcpTokenValidator{}
)
func main() {
klog.InitFlags(nil)
flag.Parse()
klog.Info("starting GKE IAP Token validator")
// read the httpPort
port := os.Getenv("HTTP_PORT")
if port != "" {
portNumber, err := strconv.Atoi(port)
if err != nil {
klog.Errorf("invalid configuration, HTTP_PORT must a number, found %q", port)
os.Exit(1)
}
httpPort = portNumber
}
// search for issuer
iss := os.Getenv("OAUTH_ISSUER")
if iss != "" |
// read backend search options
opt, err := parseSearchOptions()
if err != nil {
klog.Errorf("failed to configure search options: %v", err)
os.Exit(1)
}
searchOpt = opt
aud := os.Getenv("OAUTH_AUDIENCE")
if aud != "" {
audience = aud
} else {
// search for audience
go getRequiredClaims()
}
// setup the http handler
http.HandleFunc("/", validateJWT)
klog.V(1).Infof("Running http server on :%v", httpPort)
http.ListenAndServe(fmt.Sprintf(":%v", httpPort), nil)
}
// validateJWT validates a JWT found in the "x-goog-iap-jwt-assertion" header
// and return 200 if valid, 401 if the header is not present, and 403 if the validation fails
func validateJWT(w http.ResponseWriter, req *http.Request) {
if klog.V(3).Enabled() {
klog.Infof("request received from: %v, headers: %v", req.RemoteAddr, req.Header)
}
iapJWT := req.Header.Get("X-Goog-IAP-JWT-Assertion")
if iapJWT == "" {
klog.V(1).Infof("X-Goog-IAP-JWT-Assertion header not found")
http.Error(w, "", http.StatusUnauthorized)
return
}
if audience == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty audience, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
if issuer == "" {
klog.V(1).ErrorS(fmt.Errorf("token cannot be validated, empty issuer, check for previous errors"), "")
http.Error(w, "", http.StatusForbidden)
return
}
ctx := context.Background()
// we pass empty as audience here because we will validate it later
payload, err := jwtValidator.Validate(ctx, iapJWT, "")
klog.V(3).Infof("payload received: %+v", payload)
if err != nil {
klog.V(1).ErrorS(err, "error validating jwt token")
http.Error(w, "", http.StatusForbidden)
return
}
// empty payload should not be possible
if payload == nil {
klog.V(1).ErrorS(nil, "null payload received")
http.Error(w, "", http.StatusForbidden)
return
}
// validate the audience
if audience != payload.Audience {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid audience, expected %s, got %s", audience, payload.Audience)
http.Error(w, "", http.StatusForbidden)
return
}
// validate the issuer
if issuer != payload.Issuer {
klog.V(1).ErrorS(nil, "error validating jwt token, invalid issuer, expected %s, got %s", issuer, payload.Issuer)
http.Error(w, "", http.StatusForbidden)
return
}
// validate expired - this may be redundant - but we check it anyway
if payload.Expires == 0 || payload.Expires+30 < time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, expired")
http.Error(w, "", http.StatusForbidden)
return
}
// validate IssuedAt - should not be in the future
if payload.IssuedAt == 0 || payload.IssuedAt-30 > time.Now().Unix() {
klog.V(1).ErrorS(nil, "error validating jwt token, emitted in the future")
http.Error(w, "", http.StatusForbidden)
return
}
w.WriteHeader(http.StatusOK)
}
// getRequiredClaims finds the audience require for token validation by querying
// the google apis
func getRequiredClaims() {
resourceService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create cloudresourcemanager api service")
}
projectsService := gcpProjectsAPIService{projectsService: resourceService.Projects}
computeService, err := compute.NewService(context.Background())
if err != nil {
klog.Fatalf("cannot create compute api service")
}
backendsService := gcpBackendsAPIService{backendServicesService: computeService.BackendServices}
// TODO: use a backoff strategy
tick := time.NewTicker(retryInterval)
defer tick.Stop()
for {
klog.V(3).Info("begin call to get the project number")
projectNumber, err := getProjectNumber(projectsService)
if err != nil {
klog.Errorf("error retrieving project number: %v", err)
}
klog.V(3).Info("begin call to get the backend service id")
backendServiceID, err := getBackendServiceID(backendsService)
if err != nil {
klog.Errorf("error retrieving backend service id: %v", err)
}
if err == nil {
audience = fmt.Sprintf("/projects/%s/global/backendServices/%s", projectNumber, backendServiceID)
klog.V(1).Infof("audience value found: %s, ready to validate requests", audience)
return
}
klog.V(1).Infof("unable to retrieve the audience value, retring in %v seconds", retryInterval.Seconds())
select {
case <-tick.C:
}
}
}
func getProjectNumber(projectsService projectsAPIService) (string, error) {
klog.V(3).Infof("begin call to projects api")
project, err := projectsService.Get(searchOpt.projectID)
if err != nil {
return "", err
}
if klog.V(3).Enabled() {
klog.Infof("project: %+v", project)
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
}
func getBackendServiceID(backendServices backendsAPIService) (string, error) {
// Get only the backends where IAP is enabled
backends, err := backendServices.List("iap.enabled=true")
if err != nil {
return "", err
}
var foundBackend *compute.BackendService
for _, backend := range backends {
klog.V(1).Infof("checking backend %s", backend.Name)
klog.V(3).Infof("backend %+v", backend)
// try to match by oauth client id first
if searchOpt.oauthClientID != "" && backend.Iap.Oauth2ClientId != searchOpt.oauthClientID {
continue
}
// search for service name
if searchOpt.serviceName != "" && !strings.Contains(backend.Name, searchOpt.serviceName) {
continue
}
if searchOpt.serviceGkeName != "" || searchOpt.serviceGkePortName != "" || searchOpt.serviceGkePortNumber > 0 {
// we need to parse the backend description now
svcDesc, err := parseBackendDescription(backend.Description)
if err != nil {
klog.V(3).Infof("parse description error: %w", err)
continue
}
// search by service name
if searchOpt.serviceGkeName != "" && svcDesc.ServiceName != searchOpt.serviceGkeNamespace+"/"+searchOpt.serviceGkeName {
continue
}
// search by port name
if searchOpt.serviceGkePortName != "" &&
(svcDesc.ServicePort.Name != searchOpt.serviceGkePortName ||
searchOpt.serviceGkeName == "") {
continue
}
// search by port number
if searchOpt.serviceGkePortNumber != 0 &&
(svcDesc.ServicePort.Number != searchOpt.serviceGkePortNumber ||
searchOpt.serviceGkeName == "") {
continue
}
}
// if we got here we found our backend
if foundBackend == nil {
foundBackend = backend
// do not break here as we will search for duplicates
} else {
klog.Warning("more than one backend service found with the provided search options")
}
}
if foundBackend != nil {
return strconv.FormatUint(foundBackend.Id, 10), nil
}
return "", fmt.Errorf("Backend service not found")
}
// parseSearchOptions returns the backend search options from environment
// variables
func parseSearchOptions() (*searchOptions, error) {
options := &searchOptions{}
options.projectID = os.Getenv("PROJECT_ID")
if options.projectID == "" {
return nil, fmt.Errorf("invalid configuration, PROJECT_ID variable not found")
}
options.oauthClientID = os.Getenv("OAUTH_CLIENT_ID")
options.serviceName = os.Getenv("SERVICE_NAME")
options.serviceGkeNamespace = os.Getenv("SERVICE_GKE_NAMESPACE")
if options.serviceGkeNamespace == "" {
options.serviceGkeNamespace = "default"
}
options.serviceGkeName = os.Getenv("SERVICE_GKE_NAME")
options.serviceGkePortName = os.Getenv("SERVICE_GKE_PORT_NAME")
portNumber := os.Getenv("SERVICE_GKE_PORT_NUMBER")
if portNumber != "" {
portNumberValue, err := strconv.Atoi(portNumber)
if err != nil {
return nil, fmt.Errorf("invalid configuration, SERVICE_GKE_PORT_NUMBER must a number, found %q", portNumber)
}
options.serviceGkePortNumber = portNumberValue
}
// vaidate search options - at least one search options
if options.oauthClientID == "" &&
options.serviceName == "" &&
options.serviceGkeName == "" {
return nil, fmt.Errorf("Invalid configuration, at least one search criteria must be specified. Set one of the variables: OAUTH_CLIENT_ID, SERVICE_NAME, SERVICE_GKE_NAME")
}
// validate search options - service number with service name
if options.serviceGkeName == "" && options.serviceGkePortName != "" {
klog.Warning("[warning] SERVICE_GKE_PORT_NAME without SERVICE_GKE_NAME, value will be ignored")
}
if options.serviceGkeName == "" && options.serviceGkePortNumber != 0 {
klog.Warning("[warning] SERVICE_GKE_PORT_NUMBER without SERVICE_GKE_NAME, value will be ignored")
}
klog.Infof("search options: %+v", options)
return options, nil
}
func parseBackendDescription(description string) (*serviceDescription, error) {
if description == "" {
return nil, fmt.Errorf("empty description")
}
data := make(map[string]interface{})
if err := json.Unmarshal([]byte(description), &data); err != nil {
return nil, fmt.Errorf("decode error: description: %s, err: %w", description, err)
}
svcDesc := serviceDescription{}
if nameValue, ok := data[serviceNameKey]; ok {
svcDesc.ServiceName = nameValue.(string)
} else {
return nil, fmt.Errorf("name key not found, key:%s, description: %s", serviceNameKey, description)
}
if portValue, ok := data[servicePortKey]; ok {
servicePort := serviceBackendPort{}
pattern := `{Name:(.*),Number:(\d*),}`
reg := regexp.MustCompile(pattern)
match := reg.FindStringSubmatch(portValue.(string))
if match == nil {
return nil, fmt.Errorf("port decode error, description: %s, must match regexp %s", description, pattern)
}
servicePort.Name = match[1]
if match[2] != "" {
port, err := strconv.Atoi(match[2])
if err != nil {
return nil, fmt.Errorf("port number parse error, description: %v, err: %w", description, err)
}
servicePort.Number = port
}
svcDesc.ServicePort = servicePort
} else {
return nil, fmt.Errorf("port key not found, key:%s, description: %s", serviceNameKey, description)
}
return &svcDesc, nil
}
| {
issuer = iss
} | conditional_block |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction ?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if !final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res | else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if !event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} | conditional_block |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)] | pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction ?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if !final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if !event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
} | random_line_split | |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn | (
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction ?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if !final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if !event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| build_cfg | identifier_name |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction ?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if !final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId |
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if !event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| {
&self.object_id
} | identifier_body |
puzzleGenerator.py | import random
import sys
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
import copy
import datetime
import math
# moves = UP, RIGHT, DOWN, LEFT
moves = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def isPositionLegal(board, x, y):
n = len(board)
return ((x >= 0) and (x < n) and (y >= 0) and (y < n))
def nextPos(x,y, move):
nextX = x + move[0]
nextY = y + move[1]
return nextX, nextY
def canMove(board, direction):
| mv = moves[direction]
x, y = findGap(board)
x2, y2 = nextPos(x, y, mv)
return isPositionLegal(board, x2, y2)
# def canMove(board):
# x, y = findGap(board)
#
# for mv in moves:
# x2, y2 = nextPos(x, y, mv)
# if isPositionLegal(board, x2, y2):
# return True
#
# return False
def possibleMoves(board):
global moves
x, y = findGap(board)
res = []
for mv in moves:
x2, y2 = nextPos(x, y, mv)
if isPositionLegal(board, x2, y2):
res.append(mv)
return res
def moveGap(board, move):
x, y = findGap(board)
x2, y2 = nextPos(x, y, move)
tmp = board[x][y]
board[x][y] = board[x2][y2]
board[x2][y2] = tmp
def findGap(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
return i,j
return -1, -1
def printBoard(board):
print("")
for row in board:
row_str = ""
for cell in row:
row_str += str(cell) + " "
print(row_str)
def translateMoveToLetter(move):
m1,m2 = move
if m1 == -1 and m2 == 0:
return 'U'
if m1 == 0 and m2 == 1:
return 'R'
if m1 == 1 and m2 == 0:
return 'D'
if m1 ==0 and m2 == -1:
return 'L'
def manhattanDistance(board):
distance = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
#Mapping to actual positions to find Manhattan distance - xA,yA are the actual positions
rem = board[i][j] % l
quotient = int(board[i][j] / l)
if rem == 0:
xA = quotient-1
yA = l-1;
else:
xA = quotient
yA = rem-1
#print(str(xA)+" "+str(yA))
distance += abs(xA-i)+abs(yA-j)
# print(str(board[i][j])+" "+str(xA)+" "+str(yA)+" "+str(i)+" "+str(j))
return distance
def misplacedTiles(board):
misplaced = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
misplaced += 1
return misplaced
def calculateHeuristic(board):
return misplacedTiles(board)+manhattanDistance(board)
stackNodes = 0
maxNodes = 0
def iterativeDeepen(board,g,bound,visitedStates,path):
global stackNodes
global maxNodes
#print(path)
# print("next")
#oneDBoard = [item for sublist in board for item in sublist]
f = g+manhattanDistance(board)
#print("g = "+str(g)+" h = "+str(manhattanDistance(board))+" f= "+str(f))
#f = g+ misplacedTiles(board)
#visitedStates.append(oneDBoard)
#print("visited states are :")
#print(visitedStates)
#print(board,end="")
#print(g+manhattanDistance(board))
if f > bound: # if we find a node with h value greater than current bound, then
return [f,False] # return this bound to be used as next bound and indicate it is not success
if misplacedTiles(board) == 0: # Perform goal test
print("Success path = "+path)
print("Found result")
print("Max nodes in stack : ")
print(maxNodes)
print("cost = "+str(g))
return [f,True] # return success if Goal State found
actualBoard = copy.deepcopy(board)
minBound = math.inf
movesList = possibleMoves(board)
for move in movesList:
moveGap(board,move)
#print(visitedStates)
#oneDBoard = [item for sublist in board for item in sublist]
#if oneDBoard in visitedStates:
# print("already in")
# continue
#printBoard(board)
stackNodes += 1
if maxNodes < stackNodes:
maxNodes = stackNodes #Make the move to create the next state
# print("Max nodes in stack : ",end=" ")
#print(maxNodes)
nextBound,success = iterativeDeepen(board,g+1,bound,visitedStates,path+translateMoveToLetter(move)) # Perform iterative deepening for the next state
stackNodes -= 1
#visitedStates.remove([oneDBoard,path+translateMoveToLetter(move),g+1+manhattanDistance(board)])
#print("v len",end="")
#print(len(visitedStates))
if success == True:
return [nextBound,True]
if nextBound < minBound:
minBound = nextBound
board = copy.deepcopy(actualBoard)
return [minBound,False]
def idastar(board):
print("Inside IDA*")
start = datetime.datetime.now()
actualBoard = copy.deepcopy(board)
bound = manhattanDistance(actualBoard)
# bound = misplacedTiles(board)
while True:
# print("bound = "+str(bound))
#print("actual borad = ",end="")
#print(actualBoard)
#print("board = ",end="")
#print(board)
#print("########")
board = copy.deepcopy(actualBoard)
nextBound,success = iterativeDeepen(board,0,bound,[],'')
if success == True:
final = datetime.datetime.now()-start
#print("Finished")
print("Time taken : ")
print(final.total_seconds())
break
else:
bound = nextBound
def astar(board):
print("Inside astar")
start = datetime.datetime.now()
visitedStates = []
steps = 0
queue = Q.PriorityQueue() # queue of tuples with priority value )
actualBoard = copy.deepcopy(board)
#visitedStates.append(actualBoard)
visitedStates.append([item for sublist in actualBoard for item in sublist])
#print("actual board"+str(actualBoard))
queue.put((manhattanDistance(actualBoard),actualBoard,0, '')) # (h+g,state,g)
while not queue.empty():
steps += 1
boardConfig = queue.get()
fCurrent = boardConfig[0]
board = copy.deepcopy(boardConfig[1])
gCurrent = boardConfig[2]
pathCurrent = boardConfig[3]
# print("current g"+str(fCurrent))
# print(boardConfig)
# print("f in queue")
# for q in queue.queue:
# print(q)
# print("f = "+str(q[0]))
#if steps == 10:
# print("ending : ")
# for v in visitedStates:
# print(v)
#break;
# print("**********************************************")
# printBoard(board)
# print("**********************************************")
# print(misplacedTiles(board))
if misplacedTiles(board) == 0: #Check the goal state before expansion
final = datetime.datetime.now() - start
print("breaking with answer")
printBoard(board)
print("answer path is "+pathCurrent)
print("Time taken : ")
print(final.total_seconds())
print("Visited states")
print(len(visitedStates))
print("Queue len ")
print(len(queue.queue))
break;
actualBoard = copy.deepcopy(board)
movesList = possibleMoves(board)
for move in movesList:
# print("Move: "+str(move))
moveGap(board,move) #Make the move
heuristic = manhattanDistance(board) #Calculate number of misplaced tiles
# print("No. of misplaced tiles : "+str(misplaced))
#printBoard(board)
oneDBoard = [item for sublist in board for item in sublist]
# if oneDBoard in visitedStates:
# print("not adding :",end="")
# print(board)
if not oneDBoard in visitedStates:
# print("visited : ")
# print(board)
# print("adding is : ",end="")
# print(board)
# print(heuristic+gCurrent+1)
queue.put((heuristic+gCurrent+1,board,gCurrent+1, pathCurrent+translateMoveToLetter(move))) # add new h', ie. h+g , board, new g and current path to queue
#visitedStates.append(board)
visitedStates.append([item for sublist in board for item in sublist])
board = copy.deepcopy(actualBoard) #Go back to current state to check the next move
print("Number of steps : "+str(steps))
if __name__ == '__main__':
n = 0
k = -1
algo = -1
in_file = ''
out_file = ''
process_input = False
if len(sys.argv) == 5:
print("here")
algo = int(sys.argv[1])
n = int(sys.argv[2])
in_file = open(sys.argv[3],'r')
out_file = open(sys.argv[4],'w')
process_input = True
elif len(sys.argv) == 4:
n = int(sys.argv[1])
k = int(sys.argv[2])
out_file = open(sys.argv[3], 'w')
elif len(sys.argv) == 3:
n = int(sys.argv[1])
out_file = open(sys.argv[2], 'w')
else:
print('Wrong number of arguments. Usage:\npuzzleGenerator.py <N> <K - number of moves> <OUTPATH>\npuzzleGenerator.py <N> <OUTPATH>')
print('n = ' + str(n))
print('k = '+str(k))
print('algo = '+str(algo))
if process_input == False:
if k == -1:
a = list(range(1, n*n + 1))
random.shuffle(a)
for i in range(n):
for j in range(n):
cur = a[i * n + j]
if cur == (n*n):
out_file.write('')
else:
out_file.write(str(cur))
if j != (n-1):
out_file.write(',')
out_file.write('\n')
else:
board = []
for i in range(n):
board.append([])
for j in range(n):
if (n*i+j+1) == n*n:
board[i].append(0)
else:
board[i].append(n * i + j + 1)
printBoard(board)
for move_cnt in range(k):
pos_moves = possibleMoves(board)
move = random.choice(pos_moves)
moveGap(board, move)
printBoard(board)
for row in board:
for i in range(len(row)):
cell = row[i]
if cell != 0:
out_file.write(str(cell))
if i != (len(row) - 1):
out_file.write(",")
out_file.write("\n")
else:
print("Solving Mode ")
board = []
for i in range(n):
currentLine = in_file.readline().split(',')
board.append([])
for currentNumber in currentLine:
a = 0 if currentNumber=='\n' or currentNumber == '' else int(currentNumber)
board[i].append(a)
print("Initial setup :")
printBoard(board)
if algo == 1:
# print(misplacedTiles(board))
astar(board)
elif algo == 2:
idastar(board)
out_file.close() | random_line_split | |
puzzleGenerator.py | import random
import sys
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
import copy
import datetime
import math
# moves = UP, RIGHT, DOWN, LEFT
moves = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def isPositionLegal(board, x, y):
n = len(board)
return ((x >= 0) and (x < n) and (y >= 0) and (y < n))
def nextPos(x,y, move):
nextX = x + move[0]
nextY = y + move[1]
return nextX, nextY
def canMove(board, direction):
mv = moves[direction]
x, y = findGap(board)
x2, y2 = nextPos(x, y, mv)
return isPositionLegal(board, x2, y2)
# def canMove(board):
# x, y = findGap(board)
#
# for mv in moves:
# x2, y2 = nextPos(x, y, mv)
# if isPositionLegal(board, x2, y2):
# return True
#
# return False
def possibleMoves(board):
global moves
x, y = findGap(board)
res = []
for mv in moves:
x2, y2 = nextPos(x, y, mv)
if isPositionLegal(board, x2, y2):
res.append(mv)
return res
def moveGap(board, move):
x, y = findGap(board)
x2, y2 = nextPos(x, y, move)
tmp = board[x][y]
board[x][y] = board[x2][y2]
board[x2][y2] = tmp
def findGap(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
return i,j
return -1, -1
def | (board):
print("")
for row in board:
row_str = ""
for cell in row:
row_str += str(cell) + " "
print(row_str)
def translateMoveToLetter(move):
m1,m2 = move
if m1 == -1 and m2 == 0:
return 'U'
if m1 == 0 and m2 == 1:
return 'R'
if m1 == 1 and m2 == 0:
return 'D'
if m1 ==0 and m2 == -1:
return 'L'
def manhattanDistance(board):
distance = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
#Mapping to actual positions to find Manhattan distance - xA,yA are the actual positions
rem = board[i][j] % l
quotient = int(board[i][j] / l)
if rem == 0:
xA = quotient-1
yA = l-1;
else:
xA = quotient
yA = rem-1
#print(str(xA)+" "+str(yA))
distance += abs(xA-i)+abs(yA-j)
# print(str(board[i][j])+" "+str(xA)+" "+str(yA)+" "+str(i)+" "+str(j))
return distance
def misplacedTiles(board):
misplaced = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
misplaced += 1
return misplaced
def calculateHeuristic(board):
return misplacedTiles(board)+manhattanDistance(board)
stackNodes = 0
maxNodes = 0
def iterativeDeepen(board,g,bound,visitedStates,path):
global stackNodes
global maxNodes
#print(path)
# print("next")
#oneDBoard = [item for sublist in board for item in sublist]
f = g+manhattanDistance(board)
#print("g = "+str(g)+" h = "+str(manhattanDistance(board))+" f= "+str(f))
#f = g+ misplacedTiles(board)
#visitedStates.append(oneDBoard)
#print("visited states are :")
#print(visitedStates)
#print(board,end="")
#print(g+manhattanDistance(board))
if f > bound: # if we find a node with h value greater than current bound, then
return [f,False] # return this bound to be used as next bound and indicate it is not success
if misplacedTiles(board) == 0: # Perform goal test
print("Success path = "+path)
print("Found result")
print("Max nodes in stack : ")
print(maxNodes)
print("cost = "+str(g))
return [f,True] # return success if Goal State found
actualBoard = copy.deepcopy(board)
minBound = math.inf
movesList = possibleMoves(board)
for move in movesList:
moveGap(board,move)
#print(visitedStates)
#oneDBoard = [item for sublist in board for item in sublist]
#if oneDBoard in visitedStates:
# print("already in")
# continue
#printBoard(board)
stackNodes += 1
if maxNodes < stackNodes:
maxNodes = stackNodes #Make the move to create the next state
# print("Max nodes in stack : ",end=" ")
#print(maxNodes)
nextBound,success = iterativeDeepen(board,g+1,bound,visitedStates,path+translateMoveToLetter(move)) # Perform iterative deepening for the next state
stackNodes -= 1
#visitedStates.remove([oneDBoard,path+translateMoveToLetter(move),g+1+manhattanDistance(board)])
#print("v len",end="")
#print(len(visitedStates))
if success == True:
return [nextBound,True]
if nextBound < minBound:
minBound = nextBound
board = copy.deepcopy(actualBoard)
return [minBound,False]
def idastar(board):
print("Inside IDA*")
start = datetime.datetime.now()
actualBoard = copy.deepcopy(board)
bound = manhattanDistance(actualBoard)
# bound = misplacedTiles(board)
while True:
# print("bound = "+str(bound))
#print("actual borad = ",end="")
#print(actualBoard)
#print("board = ",end="")
#print(board)
#print("########")
board = copy.deepcopy(actualBoard)
nextBound,success = iterativeDeepen(board,0,bound,[],'')
if success == True:
final = datetime.datetime.now()-start
#print("Finished")
print("Time taken : ")
print(final.total_seconds())
break
else:
bound = nextBound
def astar(board):
print("Inside astar")
start = datetime.datetime.now()
visitedStates = []
steps = 0
queue = Q.PriorityQueue() # queue of tuples with priority value )
actualBoard = copy.deepcopy(board)
#visitedStates.append(actualBoard)
visitedStates.append([item for sublist in actualBoard for item in sublist])
#print("actual board"+str(actualBoard))
queue.put((manhattanDistance(actualBoard),actualBoard,0, '')) # (h+g,state,g)
while not queue.empty():
steps += 1
boardConfig = queue.get()
fCurrent = boardConfig[0]
board = copy.deepcopy(boardConfig[1])
gCurrent = boardConfig[2]
pathCurrent = boardConfig[3]
# print("current g"+str(fCurrent))
# print(boardConfig)
# print("f in queue")
# for q in queue.queue:
# print(q)
# print("f = "+str(q[0]))
#if steps == 10:
# print("ending : ")
# for v in visitedStates:
# print(v)
#break;
# print("**********************************************")
# printBoard(board)
# print("**********************************************")
# print(misplacedTiles(board))
if misplacedTiles(board) == 0: #Check the goal state before expansion
final = datetime.datetime.now() - start
print("breaking with answer")
printBoard(board)
print("answer path is "+pathCurrent)
print("Time taken : ")
print(final.total_seconds())
print("Visited states")
print(len(visitedStates))
print("Queue len ")
print(len(queue.queue))
break;
actualBoard = copy.deepcopy(board)
movesList = possibleMoves(board)
for move in movesList:
# print("Move: "+str(move))
moveGap(board,move) #Make the move
heuristic = manhattanDistance(board) #Calculate number of misplaced tiles
# print("No. of misplaced tiles : "+str(misplaced))
#printBoard(board)
oneDBoard = [item for sublist in board for item in sublist]
# if oneDBoard in visitedStates:
# print("not adding :",end="")
# print(board)
if not oneDBoard in visitedStates:
# print("visited : ")
# print(board)
# print("adding is : ",end="")
# print(board)
# print(heuristic+gCurrent+1)
queue.put((heuristic+gCurrent+1,board,gCurrent+1, pathCurrent+translateMoveToLetter(move))) # add new h', ie. h+g , board, new g and current path to queue
#visitedStates.append(board)
visitedStates.append([item for sublist in board for item in sublist])
board = copy.deepcopy(actualBoard) #Go back to current state to check the next move
print("Number of steps : "+str(steps))
if __name__ == '__main__':
n = 0
k = -1
algo = -1
in_file = ''
out_file = ''
process_input = False
if len(sys.argv) == 5:
print("here")
algo = int(sys.argv[1])
n = int(sys.argv[2])
in_file = open(sys.argv[3],'r')
out_file = open(sys.argv[4],'w')
process_input = True
elif len(sys.argv) == 4:
n = int(sys.argv[1])
k = int(sys.argv[2])
out_file = open(sys.argv[3], 'w')
elif len(sys.argv) == 3:
n = int(sys.argv[1])
out_file = open(sys.argv[2], 'w')
else:
print('Wrong number of arguments. Usage:\npuzzleGenerator.py <N> <K - number of moves> <OUTPATH>\npuzzleGenerator.py <N> <OUTPATH>')
print('n = ' + str(n))
print('k = '+str(k))
print('algo = '+str(algo))
if process_input == False:
if k == -1:
a = list(range(1, n*n + 1))
random.shuffle(a)
for i in range(n):
for j in range(n):
cur = a[i * n + j]
if cur == (n*n):
out_file.write('')
else:
out_file.write(str(cur))
if j != (n-1):
out_file.write(',')
out_file.write('\n')
else:
board = []
for i in range(n):
board.append([])
for j in range(n):
if (n*i+j+1) == n*n:
board[i].append(0)
else:
board[i].append(n * i + j + 1)
printBoard(board)
for move_cnt in range(k):
pos_moves = possibleMoves(board)
move = random.choice(pos_moves)
moveGap(board, move)
printBoard(board)
for row in board:
for i in range(len(row)):
cell = row[i]
if cell != 0:
out_file.write(str(cell))
if i != (len(row) - 1):
out_file.write(",")
out_file.write("\n")
else:
print("Solving Mode ")
board = []
for i in range(n):
currentLine = in_file.readline().split(',')
board.append([])
for currentNumber in currentLine:
a = 0 if currentNumber=='\n' or currentNumber == '' else int(currentNumber)
board[i].append(a)
print("Initial setup :")
printBoard(board)
if algo == 1:
# print(misplacedTiles(board))
astar(board)
elif algo == 2:
idastar(board)
out_file.close()
| printBoard | identifier_name |
puzzleGenerator.py | import random
import sys
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
import copy
import datetime
import math
# moves = UP, RIGHT, DOWN, LEFT
moves = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def isPositionLegal(board, x, y):
n = len(board)
return ((x >= 0) and (x < n) and (y >= 0) and (y < n))
def nextPos(x,y, move):
nextX = x + move[0]
nextY = y + move[1]
return nextX, nextY
def canMove(board, direction):
mv = moves[direction]
x, y = findGap(board)
x2, y2 = nextPos(x, y, mv)
return isPositionLegal(board, x2, y2)
# def canMove(board):
# x, y = findGap(board)
#
# for mv in moves:
# x2, y2 = nextPos(x, y, mv)
# if isPositionLegal(board, x2, y2):
# return True
#
# return False
def possibleMoves(board):
global moves
x, y = findGap(board)
res = []
for mv in moves:
x2, y2 = nextPos(x, y, mv)
if isPositionLegal(board, x2, y2):
res.append(mv)
return res
def moveGap(board, move):
x, y = findGap(board)
x2, y2 = nextPos(x, y, move)
tmp = board[x][y]
board[x][y] = board[x2][y2]
board[x2][y2] = tmp
def findGap(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
return i,j
return -1, -1
def printBoard(board):
|
def translateMoveToLetter(move):
m1,m2 = move
if m1 == -1 and m2 == 0:
return 'U'
if m1 == 0 and m2 == 1:
return 'R'
if m1 == 1 and m2 == 0:
return 'D'
if m1 ==0 and m2 == -1:
return 'L'
def manhattanDistance(board):
distance = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
#Mapping to actual positions to find Manhattan distance - xA,yA are the actual positions
rem = board[i][j] % l
quotient = int(board[i][j] / l)
if rem == 0:
xA = quotient-1
yA = l-1;
else:
xA = quotient
yA = rem-1
#print(str(xA)+" "+str(yA))
distance += abs(xA-i)+abs(yA-j)
# print(str(board[i][j])+" "+str(xA)+" "+str(yA)+" "+str(i)+" "+str(j))
return distance
def misplacedTiles(board):
misplaced = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
misplaced += 1
return misplaced
def calculateHeuristic(board):
return misplacedTiles(board)+manhattanDistance(board)
stackNodes = 0
maxNodes = 0
def iterativeDeepen(board,g,bound,visitedStates,path):
global stackNodes
global maxNodes
#print(path)
# print("next")
#oneDBoard = [item for sublist in board for item in sublist]
f = g+manhattanDistance(board)
#print("g = "+str(g)+" h = "+str(manhattanDistance(board))+" f= "+str(f))
#f = g+ misplacedTiles(board)
#visitedStates.append(oneDBoard)
#print("visited states are :")
#print(visitedStates)
#print(board,end="")
#print(g+manhattanDistance(board))
if f > bound: # if we find a node with h value greater than current bound, then
return [f,False] # return this bound to be used as next bound and indicate it is not success
if misplacedTiles(board) == 0: # Perform goal test
print("Success path = "+path)
print("Found result")
print("Max nodes in stack : ")
print(maxNodes)
print("cost = "+str(g))
return [f,True] # return success if Goal State found
actualBoard = copy.deepcopy(board)
minBound = math.inf
movesList = possibleMoves(board)
for move in movesList:
moveGap(board,move)
#print(visitedStates)
#oneDBoard = [item for sublist in board for item in sublist]
#if oneDBoard in visitedStates:
# print("already in")
# continue
#printBoard(board)
stackNodes += 1
if maxNodes < stackNodes:
maxNodes = stackNodes #Make the move to create the next state
# print("Max nodes in stack : ",end=" ")
#print(maxNodes)
nextBound,success = iterativeDeepen(board,g+1,bound,visitedStates,path+translateMoveToLetter(move)) # Perform iterative deepening for the next state
stackNodes -= 1
#visitedStates.remove([oneDBoard,path+translateMoveToLetter(move),g+1+manhattanDistance(board)])
#print("v len",end="")
#print(len(visitedStates))
if success == True:
return [nextBound,True]
if nextBound < minBound:
minBound = nextBound
board = copy.deepcopy(actualBoard)
return [minBound,False]
def idastar(board):
print("Inside IDA*")
start = datetime.datetime.now()
actualBoard = copy.deepcopy(board)
bound = manhattanDistance(actualBoard)
# bound = misplacedTiles(board)
while True:
# print("bound = "+str(bound))
#print("actual borad = ",end="")
#print(actualBoard)
#print("board = ",end="")
#print(board)
#print("########")
board = copy.deepcopy(actualBoard)
nextBound,success = iterativeDeepen(board,0,bound,[],'')
if success == True:
final = datetime.datetime.now()-start
#print("Finished")
print("Time taken : ")
print(final.total_seconds())
break
else:
bound = nextBound
def astar(board):
print("Inside astar")
start = datetime.datetime.now()
visitedStates = []
steps = 0
queue = Q.PriorityQueue() # queue of tuples with priority value )
actualBoard = copy.deepcopy(board)
#visitedStates.append(actualBoard)
visitedStates.append([item for sublist in actualBoard for item in sublist])
#print("actual board"+str(actualBoard))
queue.put((manhattanDistance(actualBoard),actualBoard,0, '')) # (h+g,state,g)
while not queue.empty():
steps += 1
boardConfig = queue.get()
fCurrent = boardConfig[0]
board = copy.deepcopy(boardConfig[1])
gCurrent = boardConfig[2]
pathCurrent = boardConfig[3]
# print("current g"+str(fCurrent))
# print(boardConfig)
# print("f in queue")
# for q in queue.queue:
# print(q)
# print("f = "+str(q[0]))
#if steps == 10:
# print("ending : ")
# for v in visitedStates:
# print(v)
#break;
# print("**********************************************")
# printBoard(board)
# print("**********************************************")
# print(misplacedTiles(board))
if misplacedTiles(board) == 0: #Check the goal state before expansion
final = datetime.datetime.now() - start
print("breaking with answer")
printBoard(board)
print("answer path is "+pathCurrent)
print("Time taken : ")
print(final.total_seconds())
print("Visited states")
print(len(visitedStates))
print("Queue len ")
print(len(queue.queue))
break;
actualBoard = copy.deepcopy(board)
movesList = possibleMoves(board)
for move in movesList:
# print("Move: "+str(move))
moveGap(board,move) #Make the move
heuristic = manhattanDistance(board) #Calculate number of misplaced tiles
# print("No. of misplaced tiles : "+str(misplaced))
#printBoard(board)
oneDBoard = [item for sublist in board for item in sublist]
# if oneDBoard in visitedStates:
# print("not adding :",end="")
# print(board)
if not oneDBoard in visitedStates:
# print("visited : ")
# print(board)
# print("adding is : ",end="")
# print(board)
# print(heuristic+gCurrent+1)
queue.put((heuristic+gCurrent+1,board,gCurrent+1, pathCurrent+translateMoveToLetter(move))) # add new h', ie. h+g , board, new g and current path to queue
#visitedStates.append(board)
visitedStates.append([item for sublist in board for item in sublist])
board = copy.deepcopy(actualBoard) #Go back to current state to check the next move
print("Number of steps : "+str(steps))
if __name__ == '__main__':
n = 0
k = -1
algo = -1
in_file = ''
out_file = ''
process_input = False
if len(sys.argv) == 5:
print("here")
algo = int(sys.argv[1])
n = int(sys.argv[2])
in_file = open(sys.argv[3],'r')
out_file = open(sys.argv[4],'w')
process_input = True
elif len(sys.argv) == 4:
n = int(sys.argv[1])
k = int(sys.argv[2])
out_file = open(sys.argv[3], 'w')
elif len(sys.argv) == 3:
n = int(sys.argv[1])
out_file = open(sys.argv[2], 'w')
else:
print('Wrong number of arguments. Usage:\npuzzleGenerator.py <N> <K - number of moves> <OUTPATH>\npuzzleGenerator.py <N> <OUTPATH>')
print('n = ' + str(n))
print('k = '+str(k))
print('algo = '+str(algo))
if process_input == False:
if k == -1:
a = list(range(1, n*n + 1))
random.shuffle(a)
for i in range(n):
for j in range(n):
cur = a[i * n + j]
if cur == (n*n):
out_file.write('')
else:
out_file.write(str(cur))
if j != (n-1):
out_file.write(',')
out_file.write('\n')
else:
board = []
for i in range(n):
board.append([])
for j in range(n):
if (n*i+j+1) == n*n:
board[i].append(0)
else:
board[i].append(n * i + j + 1)
printBoard(board)
for move_cnt in range(k):
pos_moves = possibleMoves(board)
move = random.choice(pos_moves)
moveGap(board, move)
printBoard(board)
for row in board:
for i in range(len(row)):
cell = row[i]
if cell != 0:
out_file.write(str(cell))
if i != (len(row) - 1):
out_file.write(",")
out_file.write("\n")
else:
print("Solving Mode ")
board = []
for i in range(n):
currentLine = in_file.readline().split(',')
board.append([])
for currentNumber in currentLine:
a = 0 if currentNumber=='\n' or currentNumber == '' else int(currentNumber)
board[i].append(a)
print("Initial setup :")
printBoard(board)
if algo == 1:
# print(misplacedTiles(board))
astar(board)
elif algo == 2:
idastar(board)
out_file.close()
| print("")
for row in board:
row_str = ""
for cell in row:
row_str += str(cell) + " "
print(row_str) | identifier_body |
puzzleGenerator.py | import random
import sys
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
import copy
import datetime
import math
# moves = UP, RIGHT, DOWN, LEFT
moves = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def isPositionLegal(board, x, y):
n = len(board)
return ((x >= 0) and (x < n) and (y >= 0) and (y < n))
def nextPos(x,y, move):
nextX = x + move[0]
nextY = y + move[1]
return nextX, nextY
def canMove(board, direction):
mv = moves[direction]
x, y = findGap(board)
x2, y2 = nextPos(x, y, mv)
return isPositionLegal(board, x2, y2)
# def canMove(board):
# x, y = findGap(board)
#
# for mv in moves:
# x2, y2 = nextPos(x, y, mv)
# if isPositionLegal(board, x2, y2):
# return True
#
# return False
def possibleMoves(board):
global moves
x, y = findGap(board)
res = []
for mv in moves:
x2, y2 = nextPos(x, y, mv)
if isPositionLegal(board, x2, y2):
res.append(mv)
return res
def moveGap(board, move):
x, y = findGap(board)
x2, y2 = nextPos(x, y, move)
tmp = board[x][y]
board[x][y] = board[x2][y2]
board[x2][y2] = tmp
def findGap(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
return i,j
return -1, -1
def printBoard(board):
print("")
for row in board:
row_str = ""
for cell in row:
row_str += str(cell) + " "
print(row_str)
def translateMoveToLetter(move):
m1,m2 = move
if m1 == -1 and m2 == 0:
return 'U'
if m1 == 0 and m2 == 1:
return 'R'
if m1 == 1 and m2 == 0:
return 'D'
if m1 ==0 and m2 == -1:
return 'L'
def manhattanDistance(board):
distance = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
#Mapping to actual positions to find Manhattan distance - xA,yA are the actual positions
rem = board[i][j] % l
quotient = int(board[i][j] / l)
if rem == 0:
xA = quotient-1
yA = l-1;
else:
xA = quotient
yA = rem-1
#print(str(xA)+" "+str(yA))
distance += abs(xA-i)+abs(yA-j)
# print(str(board[i][j])+" "+str(xA)+" "+str(yA)+" "+str(i)+" "+str(j))
return distance
def misplacedTiles(board):
misplaced = 0
l = len(board)
for i in range(l):
for j in range(l):
if board[i][j] != 0 and board[i][j] != (n*i+j+1):
misplaced += 1
return misplaced
def calculateHeuristic(board):
return misplacedTiles(board)+manhattanDistance(board)
stackNodes = 0
maxNodes = 0
def iterativeDeepen(board,g,bound,visitedStates,path):
global stackNodes
global maxNodes
#print(path)
# print("next")
#oneDBoard = [item for sublist in board for item in sublist]
f = g+manhattanDistance(board)
#print("g = "+str(g)+" h = "+str(manhattanDistance(board))+" f= "+str(f))
#f = g+ misplacedTiles(board)
#visitedStates.append(oneDBoard)
#print("visited states are :")
#print(visitedStates)
#print(board,end="")
#print(g+manhattanDistance(board))
if f > bound: # if we find a node with h value greater than current bound, then
return [f,False] # return this bound to be used as next bound and indicate it is not success
if misplacedTiles(board) == 0: # Perform goal test
print("Success path = "+path)
print("Found result")
print("Max nodes in stack : ")
print(maxNodes)
print("cost = "+str(g))
return [f,True] # return success if Goal State found
actualBoard = copy.deepcopy(board)
minBound = math.inf
movesList = possibleMoves(board)
for move in movesList:
moveGap(board,move)
#print(visitedStates)
#oneDBoard = [item for sublist in board for item in sublist]
#if oneDBoard in visitedStates:
# print("already in")
# continue
#printBoard(board)
stackNodes += 1
if maxNodes < stackNodes:
maxNodes = stackNodes #Make the move to create the next state
# print("Max nodes in stack : ",end=" ")
#print(maxNodes)
nextBound,success = iterativeDeepen(board,g+1,bound,visitedStates,path+translateMoveToLetter(move)) # Perform iterative deepening for the next state
stackNodes -= 1
#visitedStates.remove([oneDBoard,path+translateMoveToLetter(move),g+1+manhattanDistance(board)])
#print("v len",end="")
#print(len(visitedStates))
if success == True:
return [nextBound,True]
if nextBound < minBound:
minBound = nextBound
board = copy.deepcopy(actualBoard)
return [minBound,False]
def idastar(board):
print("Inside IDA*")
start = datetime.datetime.now()
actualBoard = copy.deepcopy(board)
bound = manhattanDistance(actualBoard)
# bound = misplacedTiles(board)
while True:
# print("bound = "+str(bound))
#print("actual borad = ",end="")
#print(actualBoard)
#print("board = ",end="")
#print(board)
#print("########")
board = copy.deepcopy(actualBoard)
nextBound,success = iterativeDeepen(board,0,bound,[],'')
if success == True:
final = datetime.datetime.now()-start
#print("Finished")
print("Time taken : ")
print(final.total_seconds())
break
else:
bound = nextBound
def astar(board):
print("Inside astar")
start = datetime.datetime.now()
visitedStates = []
steps = 0
queue = Q.PriorityQueue() # queue of tuples with priority value )
actualBoard = copy.deepcopy(board)
#visitedStates.append(actualBoard)
visitedStates.append([item for sublist in actualBoard for item in sublist])
#print("actual board"+str(actualBoard))
queue.put((manhattanDistance(actualBoard),actualBoard,0, '')) # (h+g,state,g)
while not queue.empty():
steps += 1
boardConfig = queue.get()
fCurrent = boardConfig[0]
board = copy.deepcopy(boardConfig[1])
gCurrent = boardConfig[2]
pathCurrent = boardConfig[3]
# print("current g"+str(fCurrent))
# print(boardConfig)
# print("f in queue")
# for q in queue.queue:
# print(q)
# print("f = "+str(q[0]))
#if steps == 10:
# print("ending : ")
# for v in visitedStates:
# print(v)
#break;
# print("**********************************************")
# printBoard(board)
# print("**********************************************")
# print(misplacedTiles(board))
if misplacedTiles(board) == 0: #Check the goal state before expansion
final = datetime.datetime.now() - start
print("breaking with answer")
printBoard(board)
print("answer path is "+pathCurrent)
print("Time taken : ")
print(final.total_seconds())
print("Visited states")
print(len(visitedStates))
print("Queue len ")
print(len(queue.queue))
break;
actualBoard = copy.deepcopy(board)
movesList = possibleMoves(board)
for move in movesList:
# print("Move: "+str(move))
moveGap(board,move) #Make the move
heuristic = manhattanDistance(board) #Calculate number of misplaced tiles
# print("No. of misplaced tiles : "+str(misplaced))
#printBoard(board)
oneDBoard = [item for sublist in board for item in sublist]
# if oneDBoard in visitedStates:
# print("not adding :",end="")
# print(board)
if not oneDBoard in visitedStates:
# print("visited : ")
# print(board)
# print("adding is : ",end="")
# print(board)
# print(heuristic+gCurrent+1)
queue.put((heuristic+gCurrent+1,board,gCurrent+1, pathCurrent+translateMoveToLetter(move))) # add new h', ie. h+g , board, new g and current path to queue
#visitedStates.append(board)
visitedStates.append([item for sublist in board for item in sublist])
board = copy.deepcopy(actualBoard) #Go back to current state to check the next move
print("Number of steps : "+str(steps))
if __name__ == '__main__':
n = 0
k = -1
algo = -1
in_file = ''
out_file = ''
process_input = False
if len(sys.argv) == 5:
print("here")
algo = int(sys.argv[1])
n = int(sys.argv[2])
in_file = open(sys.argv[3],'r')
out_file = open(sys.argv[4],'w')
process_input = True
elif len(sys.argv) == 4:
|
elif len(sys.argv) == 3:
n = int(sys.argv[1])
out_file = open(sys.argv[2], 'w')
else:
print('Wrong number of arguments. Usage:\npuzzleGenerator.py <N> <K - number of moves> <OUTPATH>\npuzzleGenerator.py <N> <OUTPATH>')
print('n = ' + str(n))
print('k = '+str(k))
print('algo = '+str(algo))
if process_input == False:
if k == -1:
a = list(range(1, n*n + 1))
random.shuffle(a)
for i in range(n):
for j in range(n):
cur = a[i * n + j]
if cur == (n*n):
out_file.write('')
else:
out_file.write(str(cur))
if j != (n-1):
out_file.write(',')
out_file.write('\n')
else:
board = []
for i in range(n):
board.append([])
for j in range(n):
if (n*i+j+1) == n*n:
board[i].append(0)
else:
board[i].append(n * i + j + 1)
printBoard(board)
for move_cnt in range(k):
pos_moves = possibleMoves(board)
move = random.choice(pos_moves)
moveGap(board, move)
printBoard(board)
for row in board:
for i in range(len(row)):
cell = row[i]
if cell != 0:
out_file.write(str(cell))
if i != (len(row) - 1):
out_file.write(",")
out_file.write("\n")
else:
print("Solving Mode ")
board = []
for i in range(n):
currentLine = in_file.readline().split(',')
board.append([])
for currentNumber in currentLine:
a = 0 if currentNumber=='\n' or currentNumber == '' else int(currentNumber)
board[i].append(a)
print("Initial setup :")
printBoard(board)
if algo == 1:
# print(misplacedTiles(board))
astar(board)
elif algo == 2:
idastar(board)
out_file.close()
| n = int(sys.argv[1])
k = int(sys.argv[2])
out_file = open(sys.argv[3], 'w') | conditional_block |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) | else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if !cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if !updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| {
debug!("sending frame {:?}", frame);
} | conditional_block |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) |
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if !cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if !updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
} | identifier_body |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if !cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
} | self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if !updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
} | CommonToWriteMessage::StreamEnqueue(stream_id, part) => { | random_line_split |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn | (&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if !self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if !cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if !updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| write_part | identifier_name |
parseschema2.py | # -- coding: utf-8 --
# Copyright (c) 2011-2015 Andrew Hankinson, Alastair Porter, and Others
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
raise Exception("requires python 2.7")
from lxml import etree
import os
import shutil
import codecs
import re
from argparse import ArgumentParser
import logging
lg = logging.getLogger('schemaparser')
f = logging.Formatter("%(levelname)s %(asctime)s On Line: %(lineno)d %(message)s")
h = logging.StreamHandler()
h.setFormatter(f)
lg.setLevel(logging.DEBUG)
lg.addHandler(h)
# globals
TEI_NS = {"tei": "http://www.tei-c.org/ns/1.0"}
TEI_RNG_NS = {"tei": "http://www.tei-c.org/ns/1.0", "rng": "http://relaxng.org/ns/structure/1.0"}
NAMESPACES = {'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/1999/xlink'}
class MeiSchema(object):
def __init__(self, oddfile):
parser = etree.XMLParser(resolve_entities=True)
self.schema = etree.parse(oddfile, parser)
# self.customization = etree.parse(customization_file)
self.active_modules = [] # the modules active in the resulting output
self.element_structure = {} # the element structure.
self.attribute_group_structure = {} # the attribute group structure
self.inverse_attribute_group_structure = {} # inverted, so we can map attgroups to modules
self.get_elements()
self.get_attribute_groups()
self.invert_attribute_group_structure()
self.set_active_modules()
def get_elements(self):
elements = [m for m in self.schema.xpath("//tei:elementSpec", namespaces=TEI_NS)]
for element in elements:
modname = element.get("module").split(".")[-1]
if modname not in self.element_structure.keys():
self.element_structure[modname] = {}
element_name = element.get("ident")
memberships = []
element_membership = element.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
for member in element_membership:
if member.get("key").split(".")[0] != "att":
# skip the models that this element might be a member of
continue
self.__get_membership(member, memberships)
# memberships.kesort()
self.element_structure[modname][element_name] = memberships
# need a way to keep self-defined attributes:
selfattributes = []
attdefs = element.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if attdefs:
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
selfattributes.append(attname)
self.element_structure[modname][element_name].append(selfattributes)
def get_attribute_groups(self):
attribute_groups = [m for m in self.schema.xpath("//tei:classSpec[@type=$at]", at="atts", namespaces=TEI_NS)]
for group in attribute_groups:
group_name = group.get("ident")
if group_name == "att.id":
continue
group_module = group.get("module").split(".")[-1]
attdefs = group.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if not attdefs:
continue
if group_module not in self.attribute_group_structure.keys():
self.attribute_group_structure[group_module] = {}
self.attribute_group_structure[group_module][group_name] = []
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
self.attribute_group_structure[group_module][group_name].append(attname)
def invert_attribute_group_structure(self):
for module, groups in self.attribute_group_structure.items():
for attgroup in groups:
self.inverse_attribute_group_structure[attgroup] = module
def set_active_modules(self):
self.active_modules = list(self.element_structure.keys())
self.active_modules.sort()
def __process_att(self, attdef):
attname = ""
attdefident = attdef.get("ident")
if "-" in attdefident:
f, l = attdefident.split("-")
attdefident = "{0}{1}".format(f, l.title())
if attdef.get("ns"):
attname = "{0}|{1}".format(attdef.get("ns"), attdefident)
elif ":" in attdefident:
pfx, att = attdefident.split(":")
attname = "{0}|{1}".format(NAMESPACES[pfx], att)
else:
attname = "{0}".format(attdefident)
return attname
def __get_membership(self, member, resarr):
member_attgroup = member.xpath("//tei:classSpec[@type=$att][@ident=$nm]", att="atts", nm=member.get("key"), namespaces=TEI_NS)
if member_attgroup:
member_attgroup = member_attgroup[0]
else:
return
if member_attgroup.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS):
if member_attgroup.get("ident") == "att.id":
return
resarr.append(member_attgroup.get("ident"))
m2s = member_attgroup.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
if not m2s:
return
for mship in m2s:
self.__get_membership(mship, resarr)
def strpatt(self, name):
""" Returns a version of the string with any leading att. stripped. """
return name.replace("att.", "")
def strpdot(self, name):
return "".join([n for n in name.split(".")])
def | (self, name):
""" Returns a CamelCasedName version of attribute.case.names.
"""
return "".join([n[0].upper() + n[1:] for n in name.split(".")])
def getattdocs(self, aname):
""" returns the documentation string for element name, or an empty string if there is none."""
dsc = self.schema.xpath("//tei:attDef[@ident=$name]/tei:desc/text()", name=aname, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
def geteldocs(self, ename):
dsc = self.schema.xpath("//tei:elementSpec[@ident=$name]/tei:desc/text()", name=ename, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
if __name__ == "__main__":
p = ArgumentParser(usage='%(prog)s [compiled | -sl] [-h] [-o OUTDIR] [-i INCLUDES] [-d] [-l [LANG [LANG ...]]]') #Custom usage message to show user [compiled] should go before all other flags
exclusive_group = p.add_mutually_exclusive_group()
exclusive_group.add_argument("compiled", help="A compiled ODD file", nargs="?") # Due to nargs="?", "compiled" will appear as optional and not positional
p.add_argument("-o", "--outdir", default="output", help="output directory")
p.add_argument("-l", "--lang", default=["python"], help="Programming language or languages to output. To output multiple languages at once, list desired languages separated by a space after -l. For example: python parseschema2.py [compiled] -l python cpp", nargs="*")
p.add_argument("-i", "--includes", help="Parse external includes from a given directory")
p.add_argument("-d", "--debugging", help="Run with verbose output", action="store_true")
exclusive_group.add_argument("-sl", "--showlang", help="Show languages and exit.", action="store_true")
args = p.parse_args()
if not args.showlang and not args.compiled:
p.print_usage()
print("error: You must include a compiled ODD file")
sys.exit(1)
avail_langs = ["cpp", "python", "manuscript"]
if not args.lang == "python":
for l_langs in args.lang:
if l_langs.lower() not in avail_langs:
p.print_usage()
print("error: One or more of the languages you have chosen are not supported. To check supported languages use the -sl flag")
sys.exit(1)
if args.showlang:
import langs
print("Available Output Languages")
for l in langs.AVAILABLE_LANGS:
print("\t{0}".format(l))
sys.exit(0)
compiled_odd = args.compiled
mei_source = codecs.open(compiled_odd, 'r', 'utf-8')
# sf = codecs.open(args.source,'r', "utf-8")
# cf = codecs.open(args.customization, 'r', "utf-8")
outdir = args.outdir
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
schema = MeiSchema(mei_source)
for l_langs in args.lang:
if "cpp" in l_langs.lower():
import langs.cplusplus as cpp
output_directory = os.path.join(outdir, "cpp")
if os.path.exists(output_directory):
lg.debug("Removing old C++ output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
cpp.create(schema, output_directory)
if args.includes:
cpp.parse_includes(output_directory, args.includes)
if "python" in l_langs.lower():
import langs.python as py
output_directory = os.path.join(outdir, "python")
if os.path.exists(output_directory):
lg.debug("Removing old Python output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
py.create(schema, output_directory)
if args.includes:
py.parse_includes(output_directory, args.includes)
if "manuscript" in l_langs.lower():
import langs.manuscript as ms
output_directory = os.path.join(outdir, "manuscript")
if os.path.exists(output_directory):
lg.debug("Removing old Manuscript output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
ms.create(schema, output_directory)
if "java" in args.lang:
import langs.java as java
output_directory = os.path.join(outdir, "java")
if os.path.exists(output_directory):
lg.debug("Removing old Java output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
java.create(schema, output_directory)
if args.includes:
java.parse_includes(output_directory, args.includes)
mei_source.close()
sys.exit(0)
| cc | identifier_name |
parseschema2.py | # -- coding: utf-8 --
# Copyright (c) 2011-2015 Andrew Hankinson, Alastair Porter, and Others
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
raise Exception("requires python 2.7")
from lxml import etree
import os
import shutil
import codecs
import re
from argparse import ArgumentParser
import logging
lg = logging.getLogger('schemaparser')
f = logging.Formatter("%(levelname)s %(asctime)s On Line: %(lineno)d %(message)s")
h = logging.StreamHandler()
h.setFormatter(f)
lg.setLevel(logging.DEBUG)
lg.addHandler(h)
# globals
TEI_NS = {"tei": "http://www.tei-c.org/ns/1.0"}
TEI_RNG_NS = {"tei": "http://www.tei-c.org/ns/1.0", "rng": "http://relaxng.org/ns/structure/1.0"}
NAMESPACES = {'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/1999/xlink'}
class MeiSchema(object):
def __init__(self, oddfile):
parser = etree.XMLParser(resolve_entities=True)
self.schema = etree.parse(oddfile, parser)
# self.customization = etree.parse(customization_file)
self.active_modules = [] # the modules active in the resulting output
self.element_structure = {} # the element structure.
self.attribute_group_structure = {} # the attribute group structure
self.inverse_attribute_group_structure = {} # inverted, so we can map attgroups to modules
self.get_elements()
self.get_attribute_groups()
self.invert_attribute_group_structure()
self.set_active_modules()
def get_elements(self):
elements = [m for m in self.schema.xpath("//tei:elementSpec", namespaces=TEI_NS)]
for element in elements:
modname = element.get("module").split(".")[-1]
if modname not in self.element_structure.keys():
self.element_structure[modname] = {}
element_name = element.get("ident")
memberships = []
element_membership = element.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
for member in element_membership:
if member.get("key").split(".")[0] != "att":
# skip the models that this element might be a member of
continue
self.__get_membership(member, memberships)
# memberships.kesort()
self.element_structure[modname][element_name] = memberships
# need a way to keep self-defined attributes:
selfattributes = []
attdefs = element.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if attdefs:
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
selfattributes.append(attname)
self.element_structure[modname][element_name].append(selfattributes)
def get_attribute_groups(self):
attribute_groups = [m for m in self.schema.xpath("//tei:classSpec[@type=$at]", at="atts", namespaces=TEI_NS)]
for group in attribute_groups:
group_name = group.get("ident")
if group_name == "att.id":
continue
group_module = group.get("module").split(".")[-1]
attdefs = group.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if not attdefs:
continue
if group_module not in self.attribute_group_structure.keys():
self.attribute_group_structure[group_module] = {}
self.attribute_group_structure[group_module][group_name] = []
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
self.attribute_group_structure[group_module][group_name].append(attname)
def invert_attribute_group_structure(self):
for module, groups in self.attribute_group_structure.items():
for attgroup in groups:
self.inverse_attribute_group_structure[attgroup] = module
def set_active_modules(self):
self.active_modules = list(self.element_structure.keys())
self.active_modules.sort()
def __process_att(self, attdef):
attname = ""
attdefident = attdef.get("ident")
if "-" in attdefident:
f, l = attdefident.split("-")
attdefident = "{0}{1}".format(f, l.title())
if attdef.get("ns"):
attname = "{0}|{1}".format(attdef.get("ns"), attdefident)
elif ":" in attdefident:
pfx, att = attdefident.split(":")
attname = "{0}|{1}".format(NAMESPACES[pfx], att)
else:
attname = "{0}".format(attdefident)
return attname
def __get_membership(self, member, resarr):
member_attgroup = member.xpath("//tei:classSpec[@type=$att][@ident=$nm]", att="atts", nm=member.get("key"), namespaces=TEI_NS)
if member_attgroup:
member_attgroup = member_attgroup[0]
else:
return
if member_attgroup.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS):
if member_attgroup.get("ident") == "att.id":
return
resarr.append(member_attgroup.get("ident"))
m2s = member_attgroup.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
if not m2s:
return
for mship in m2s:
self.__get_membership(mship, resarr)
def strpatt(self, name):
""" Returns a version of the string with any leading att. stripped. """
return name.replace("att.", "")
def strpdot(self, name):
return "".join([n for n in name.split(".")])
def cc(self, name):
""" Returns a CamelCasedName version of attribute.case.names.
"""
return "".join([n[0].upper() + n[1:] for n in name.split(".")])
def getattdocs(self, aname):
""" returns the documentation string for element name, or an empty string if there is none."""
dsc = self.schema.xpath("//tei:attDef[@ident=$name]/tei:desc/text()", name=aname, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
def geteldocs(self, ename):
dsc = self.schema.xpath("//tei:elementSpec[@ident=$name]/tei:desc/text()", name=ename, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
if __name__ == "__main__":
p = ArgumentParser(usage='%(prog)s [compiled | -sl] [-h] [-o OUTDIR] [-i INCLUDES] [-d] [-l [LANG [LANG ...]]]') #Custom usage message to show user [compiled] should go before all other flags | p.add_argument("-o", "--outdir", default="output", help="output directory")
p.add_argument("-l", "--lang", default=["python"], help="Programming language or languages to output. To output multiple languages at once, list desired languages separated by a space after -l. For example: python parseschema2.py [compiled] -l python cpp", nargs="*")
p.add_argument("-i", "--includes", help="Parse external includes from a given directory")
p.add_argument("-d", "--debugging", help="Run with verbose output", action="store_true")
exclusive_group.add_argument("-sl", "--showlang", help="Show languages and exit.", action="store_true")
args = p.parse_args()
if not args.showlang and not args.compiled:
p.print_usage()
print("error: You must include a compiled ODD file")
sys.exit(1)
avail_langs = ["cpp", "python", "manuscript"]
if not args.lang == "python":
for l_langs in args.lang:
if l_langs.lower() not in avail_langs:
p.print_usage()
print("error: One or more of the languages you have chosen are not supported. To check supported languages use the -sl flag")
sys.exit(1)
if args.showlang:
import langs
print("Available Output Languages")
for l in langs.AVAILABLE_LANGS:
print("\t{0}".format(l))
sys.exit(0)
compiled_odd = args.compiled
mei_source = codecs.open(compiled_odd, 'r', 'utf-8')
# sf = codecs.open(args.source,'r', "utf-8")
# cf = codecs.open(args.customization, 'r', "utf-8")
outdir = args.outdir
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
schema = MeiSchema(mei_source)
for l_langs in args.lang:
if "cpp" in l_langs.lower():
import langs.cplusplus as cpp
output_directory = os.path.join(outdir, "cpp")
if os.path.exists(output_directory):
lg.debug("Removing old C++ output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
cpp.create(schema, output_directory)
if args.includes:
cpp.parse_includes(output_directory, args.includes)
if "python" in l_langs.lower():
import langs.python as py
output_directory = os.path.join(outdir, "python")
if os.path.exists(output_directory):
lg.debug("Removing old Python output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
py.create(schema, output_directory)
if args.includes:
py.parse_includes(output_directory, args.includes)
if "manuscript" in l_langs.lower():
import langs.manuscript as ms
output_directory = os.path.join(outdir, "manuscript")
if os.path.exists(output_directory):
lg.debug("Removing old Manuscript output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
ms.create(schema, output_directory)
if "java" in args.lang:
import langs.java as java
output_directory = os.path.join(outdir, "java")
if os.path.exists(output_directory):
lg.debug("Removing old Java output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
java.create(schema, output_directory)
if args.includes:
java.parse_includes(output_directory, args.includes)
mei_source.close()
sys.exit(0) | exclusive_group = p.add_mutually_exclusive_group()
exclusive_group.add_argument("compiled", help="A compiled ODD file", nargs="?") # Due to nargs="?", "compiled" will appear as optional and not positional | random_line_split |
parseschema2.py | # -- coding: utf-8 --
# Copyright (c) 2011-2015 Andrew Hankinson, Alastair Porter, and Others
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
raise Exception("requires python 2.7")
from lxml import etree
import os
import shutil
import codecs
import re
from argparse import ArgumentParser
import logging
lg = logging.getLogger('schemaparser')
f = logging.Formatter("%(levelname)s %(asctime)s On Line: %(lineno)d %(message)s")
h = logging.StreamHandler()
h.setFormatter(f)
lg.setLevel(logging.DEBUG)
lg.addHandler(h)
# globals
TEI_NS = {"tei": "http://www.tei-c.org/ns/1.0"}
TEI_RNG_NS = {"tei": "http://www.tei-c.org/ns/1.0", "rng": "http://relaxng.org/ns/structure/1.0"}
NAMESPACES = {'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/1999/xlink'}
class MeiSchema(object):
def __init__(self, oddfile):
parser = etree.XMLParser(resolve_entities=True)
self.schema = etree.parse(oddfile, parser)
# self.customization = etree.parse(customization_file)
self.active_modules = [] # the modules active in the resulting output
self.element_structure = {} # the element structure.
self.attribute_group_structure = {} # the attribute group structure
self.inverse_attribute_group_structure = {} # inverted, so we can map attgroups to modules
self.get_elements()
self.get_attribute_groups()
self.invert_attribute_group_structure()
self.set_active_modules()
def get_elements(self):
elements = [m for m in self.schema.xpath("//tei:elementSpec", namespaces=TEI_NS)]
for element in elements:
modname = element.get("module").split(".")[-1]
if modname not in self.element_structure.keys():
self.element_structure[modname] = {}
element_name = element.get("ident")
memberships = []
element_membership = element.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
for member in element_membership:
if member.get("key").split(".")[0] != "att":
# skip the models that this element might be a member of
continue
self.__get_membership(member, memberships)
# memberships.kesort()
self.element_structure[modname][element_name] = memberships
# need a way to keep self-defined attributes:
selfattributes = []
attdefs = element.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if attdefs:
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
selfattributes.append(attname)
self.element_structure[modname][element_name].append(selfattributes)
def get_attribute_groups(self):
attribute_groups = [m for m in self.schema.xpath("//tei:classSpec[@type=$at]", at="atts", namespaces=TEI_NS)]
for group in attribute_groups:
group_name = group.get("ident")
if group_name == "att.id":
continue
group_module = group.get("module").split(".")[-1]
attdefs = group.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if not attdefs:
continue
if group_module not in self.attribute_group_structure.keys():
self.attribute_group_structure[group_module] = {}
self.attribute_group_structure[group_module][group_name] = []
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
self.attribute_group_structure[group_module][group_name].append(attname)
def invert_attribute_group_structure(self):
for module, groups in self.attribute_group_structure.items():
for attgroup in groups:
self.inverse_attribute_group_structure[attgroup] = module
def set_active_modules(self):
self.active_modules = list(self.element_structure.keys())
self.active_modules.sort()
def __process_att(self, attdef):
attname = ""
attdefident = attdef.get("ident")
if "-" in attdefident:
f, l = attdefident.split("-")
attdefident = "{0}{1}".format(f, l.title())
if attdef.get("ns"):
attname = "{0}|{1}".format(attdef.get("ns"), attdefident)
elif ":" in attdefident:
pfx, att = attdefident.split(":")
attname = "{0}|{1}".format(NAMESPACES[pfx], att)
else:
attname = "{0}".format(attdefident)
return attname
def __get_membership(self, member, resarr):
member_attgroup = member.xpath("//tei:classSpec[@type=$att][@ident=$nm]", att="atts", nm=member.get("key"), namespaces=TEI_NS)
if member_attgroup:
member_attgroup = member_attgroup[0]
else:
return
if member_attgroup.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS):
if member_attgroup.get("ident") == "att.id":
return
resarr.append(member_attgroup.get("ident"))
m2s = member_attgroup.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
if not m2s:
return
for mship in m2s:
self.__get_membership(mship, resarr)
def strpatt(self, name):
""" Returns a version of the string with any leading att. stripped. """
return name.replace("att.", "")
def strpdot(self, name):
return "".join([n for n in name.split(".")])
def cc(self, name):
""" Returns a CamelCasedName version of attribute.case.names.
"""
return "".join([n[0].upper() + n[1:] for n in name.split(".")])
def getattdocs(self, aname):
""" returns the documentation string for element name, or an empty string if there is none."""
dsc = self.schema.xpath("//tei:attDef[@ident=$name]/tei:desc/text()", name=aname, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
def geteldocs(self, ename):
dsc = self.schema.xpath("//tei:elementSpec[@ident=$name]/tei:desc/text()", name=ename, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
if __name__ == "__main__":
p = ArgumentParser(usage='%(prog)s [compiled | -sl] [-h] [-o OUTDIR] [-i INCLUDES] [-d] [-l [LANG [LANG ...]]]') #Custom usage message to show user [compiled] should go before all other flags
exclusive_group = p.add_mutually_exclusive_group()
exclusive_group.add_argument("compiled", help="A compiled ODD file", nargs="?") # Due to nargs="?", "compiled" will appear as optional and not positional
p.add_argument("-o", "--outdir", default="output", help="output directory")
p.add_argument("-l", "--lang", default=["python"], help="Programming language or languages to output. To output multiple languages at once, list desired languages separated by a space after -l. For example: python parseschema2.py [compiled] -l python cpp", nargs="*")
p.add_argument("-i", "--includes", help="Parse external includes from a given directory")
p.add_argument("-d", "--debugging", help="Run with verbose output", action="store_true")
exclusive_group.add_argument("-sl", "--showlang", help="Show languages and exit.", action="store_true")
args = p.parse_args()
if not args.showlang and not args.compiled:
p.print_usage()
print("error: You must include a compiled ODD file")
sys.exit(1)
avail_langs = ["cpp", "python", "manuscript"]
if not args.lang == "python":
for l_langs in args.lang:
if l_langs.lower() not in avail_langs:
p.print_usage()
print("error: One or more of the languages you have chosen are not supported. To check supported languages use the -sl flag")
sys.exit(1)
if args.showlang:
import langs
print("Available Output Languages")
for l in langs.AVAILABLE_LANGS:
print("\t{0}".format(l))
sys.exit(0)
compiled_odd = args.compiled
mei_source = codecs.open(compiled_odd, 'r', 'utf-8')
# sf = codecs.open(args.source,'r', "utf-8")
# cf = codecs.open(args.customization, 'r', "utf-8")
outdir = args.outdir
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
schema = MeiSchema(mei_source)
for l_langs in args.lang:
if "cpp" in l_langs.lower():
import langs.cplusplus as cpp
output_directory = os.path.join(outdir, "cpp")
if os.path.exists(output_directory):
lg.debug("Removing old C++ output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
cpp.create(schema, output_directory)
if args.includes:
cpp.parse_includes(output_directory, args.includes)
if "python" in l_langs.lower():
import langs.python as py
output_directory = os.path.join(outdir, "python")
if os.path.exists(output_directory):
lg.debug("Removing old Python output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
py.create(schema, output_directory)
if args.includes:
py.parse_includes(output_directory, args.includes)
if "manuscript" in l_langs.lower():
import langs.manuscript as ms
output_directory = os.path.join(outdir, "manuscript")
if os.path.exists(output_directory):
|
os.mkdir(output_directory)
ms.create(schema, output_directory)
if "java" in args.lang:
import langs.java as java
output_directory = os.path.join(outdir, "java")
if os.path.exists(output_directory):
lg.debug("Removing old Java output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
java.create(schema, output_directory)
if args.includes:
java.parse_includes(output_directory, args.includes)
mei_source.close()
sys.exit(0)
| lg.debug("Removing old Manuscript output directory")
shutil.rmtree(output_directory) | conditional_block |
parseschema2.py | # -- coding: utf-8 --
# Copyright (c) 2011-2015 Andrew Hankinson, Alastair Porter, and Others
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
raise Exception("requires python 2.7")
from lxml import etree
import os
import shutil
import codecs
import re
from argparse import ArgumentParser
import logging
lg = logging.getLogger('schemaparser')
f = logging.Formatter("%(levelname)s %(asctime)s On Line: %(lineno)d %(message)s")
h = logging.StreamHandler()
h.setFormatter(f)
lg.setLevel(logging.DEBUG)
lg.addHandler(h)
# globals
TEI_NS = {"tei": "http://www.tei-c.org/ns/1.0"}
TEI_RNG_NS = {"tei": "http://www.tei-c.org/ns/1.0", "rng": "http://relaxng.org/ns/structure/1.0"}
NAMESPACES = {'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/1999/xlink'}
class MeiSchema(object):
def __init__(self, oddfile):
parser = etree.XMLParser(resolve_entities=True)
self.schema = etree.parse(oddfile, parser)
# self.customization = etree.parse(customization_file)
self.active_modules = [] # the modules active in the resulting output
self.element_structure = {} # the element structure.
self.attribute_group_structure = {} # the attribute group structure
self.inverse_attribute_group_structure = {} # inverted, so we can map attgroups to modules
self.get_elements()
self.get_attribute_groups()
self.invert_attribute_group_structure()
self.set_active_modules()
def get_elements(self):
elements = [m for m in self.schema.xpath("//tei:elementSpec", namespaces=TEI_NS)]
for element in elements:
modname = element.get("module").split(".")[-1]
if modname not in self.element_structure.keys():
self.element_structure[modname] = {}
element_name = element.get("ident")
memberships = []
element_membership = element.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
for member in element_membership:
if member.get("key").split(".")[0] != "att":
# skip the models that this element might be a member of
continue
self.__get_membership(member, memberships)
# memberships.kesort()
self.element_structure[modname][element_name] = memberships
# need a way to keep self-defined attributes:
selfattributes = []
attdefs = element.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if attdefs:
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
selfattributes.append(attname)
self.element_structure[modname][element_name].append(selfattributes)
def get_attribute_groups(self):
attribute_groups = [m for m in self.schema.xpath("//tei:classSpec[@type=$at]", at="atts", namespaces=TEI_NS)]
for group in attribute_groups:
group_name = group.get("ident")
if group_name == "att.id":
continue
group_module = group.get("module").split(".")[-1]
attdefs = group.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS)
if not attdefs:
continue
if group_module not in self.attribute_group_structure.keys():
self.attribute_group_structure[group_module] = {}
self.attribute_group_structure[group_module][group_name] = []
for attdef in attdefs:
if attdef.get("ident") == "id":
continue
attname = self.__process_att(attdef)
self.attribute_group_structure[group_module][group_name].append(attname)
def invert_attribute_group_structure(self):
for module, groups in self.attribute_group_structure.items():
for attgroup in groups:
self.inverse_attribute_group_structure[attgroup] = module
def set_active_modules(self):
self.active_modules = list(self.element_structure.keys())
self.active_modules.sort()
def __process_att(self, attdef):
attname = ""
attdefident = attdef.get("ident")
if "-" in attdefident:
f, l = attdefident.split("-")
attdefident = "{0}{1}".format(f, l.title())
if attdef.get("ns"):
attname = "{0}|{1}".format(attdef.get("ns"), attdefident)
elif ":" in attdefident:
pfx, att = attdefident.split(":")
attname = "{0}|{1}".format(NAMESPACES[pfx], att)
else:
attname = "{0}".format(attdefident)
return attname
def __get_membership(self, member, resarr):
member_attgroup = member.xpath("//tei:classSpec[@type=$att][@ident=$nm]", att="atts", nm=member.get("key"), namespaces=TEI_NS)
if member_attgroup:
member_attgroup = member_attgroup[0]
else:
return
if member_attgroup.xpath("./tei:attList/tei:attDef", namespaces=TEI_NS):
if member_attgroup.get("ident") == "att.id":
return
resarr.append(member_attgroup.get("ident"))
m2s = member_attgroup.xpath("./tei:classes/tei:memberOf", namespaces=TEI_NS)
if not m2s:
return
for mship in m2s:
self.__get_membership(mship, resarr)
def strpatt(self, name):
""" Returns a version of the string with any leading att. stripped. """
return name.replace("att.", "")
def strpdot(self, name):
return "".join([n for n in name.split(".")])
def cc(self, name):
|
def getattdocs(self, aname):
""" returns the documentation string for element name, or an empty string if there is none."""
dsc = self.schema.xpath("//tei:attDef[@ident=$name]/tei:desc/text()", name=aname, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
def geteldocs(self, ename):
dsc = self.schema.xpath("//tei:elementSpec[@ident=$name]/tei:desc/text()", name=ename, namespaces=TEI_NS)
if dsc:
return re.sub('[\s\t]+', ' ', dsc[0]) # strip extraneous whitespace
else:
return ""
if __name__ == "__main__":
p = ArgumentParser(usage='%(prog)s [compiled | -sl] [-h] [-o OUTDIR] [-i INCLUDES] [-d] [-l [LANG [LANG ...]]]') #Custom usage message to show user [compiled] should go before all other flags
exclusive_group = p.add_mutually_exclusive_group()
exclusive_group.add_argument("compiled", help="A compiled ODD file", nargs="?") # Due to nargs="?", "compiled" will appear as optional and not positional
p.add_argument("-o", "--outdir", default="output", help="output directory")
p.add_argument("-l", "--lang", default=["python"], help="Programming language or languages to output. To output multiple languages at once, list desired languages separated by a space after -l. For example: python parseschema2.py [compiled] -l python cpp", nargs="*")
p.add_argument("-i", "--includes", help="Parse external includes from a given directory")
p.add_argument("-d", "--debugging", help="Run with verbose output", action="store_true")
exclusive_group.add_argument("-sl", "--showlang", help="Show languages and exit.", action="store_true")
args = p.parse_args()
if not args.showlang and not args.compiled:
p.print_usage()
print("error: You must include a compiled ODD file")
sys.exit(1)
avail_langs = ["cpp", "python", "manuscript"]
if not args.lang == "python":
for l_langs in args.lang:
if l_langs.lower() not in avail_langs:
p.print_usage()
print("error: One or more of the languages you have chosen are not supported. To check supported languages use the -sl flag")
sys.exit(1)
if args.showlang:
import langs
print("Available Output Languages")
for l in langs.AVAILABLE_LANGS:
print("\t{0}".format(l))
sys.exit(0)
compiled_odd = args.compiled
mei_source = codecs.open(compiled_odd, 'r', 'utf-8')
# sf = codecs.open(args.source,'r', "utf-8")
# cf = codecs.open(args.customization, 'r', "utf-8")
outdir = args.outdir
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
schema = MeiSchema(mei_source)
for l_langs in args.lang:
if "cpp" in l_langs.lower():
import langs.cplusplus as cpp
output_directory = os.path.join(outdir, "cpp")
if os.path.exists(output_directory):
lg.debug("Removing old C++ output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
cpp.create(schema, output_directory)
if args.includes:
cpp.parse_includes(output_directory, args.includes)
if "python" in l_langs.lower():
import langs.python as py
output_directory = os.path.join(outdir, "python")
if os.path.exists(output_directory):
lg.debug("Removing old Python output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
py.create(schema, output_directory)
if args.includes:
py.parse_includes(output_directory, args.includes)
if "manuscript" in l_langs.lower():
import langs.manuscript as ms
output_directory = os.path.join(outdir, "manuscript")
if os.path.exists(output_directory):
lg.debug("Removing old Manuscript output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
ms.create(schema, output_directory)
if "java" in args.lang:
import langs.java as java
output_directory = os.path.join(outdir, "java")
if os.path.exists(output_directory):
lg.debug("Removing old Java output directory")
shutil.rmtree(output_directory)
os.mkdir(output_directory)
java.create(schema, output_directory)
if args.includes:
java.parse_includes(output_directory, args.includes)
mei_source.close()
sys.exit(0)
| """ Returns a CamelCasedName version of attribute.case.names.
"""
return "".join([n[0].upper() + n[1:] for n in name.split(".")]) | identifier_body |
views.py | from rest_framework import generics, status, views
from .serializers import (RegisterSerializer, EmailVerificationSerializer, LoginSerializer,
CustomerSerializer, CustomerSerializerDetail, LogoutSerializer, ResetPasswordSerializer,
SetNewPasswordSerializer, PhoneNumberSerializer, OtpSerializer)
from rest_framework.response import Response
from .models import User, Customer, Admin
from django.db import transaction
from django.contrib.auth import logout
from rest_framework_simplejwt.tokens import RefreshToken
from .utils import email_template, generate_otp
from django.contrib.sites.shortcuts import get_current_site
import jwt
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse # takes url name and gives us the path
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, force_str, smart_bytes, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from twilio.rest import Client
from datetime import timedelta
from django.utils import timezone
class RegisterView(generics.GenericAPIView):
"""View for registering new users"""
serializer_class = RegisterSerializer
@transaction.atomic()
def post(self, request):
'''Sends a post request to create a new user'''
user = request.data # gets details passed from the request and assigns it to user
full_name = request.data.get('full_name')
serializer = RegisterSerializer(
data=user) # serializes and validates the data sent in request by passing it to register serializer
serializer.is_valid(raise_exception=True) # confirms that the data in serializer is indeed valid
serializer.save() # creates and saves this data which is user to db
user_data = serializer.data # user data is the data that the serializer has saved
user = User.objects.get(
email=user_data['email']) # initializes a user by fetching it from the db using the users email
token = RefreshToken.for_user(user).access_token # generates and ties a token to the users email passed to it
customer = {"user_id": str(user.id), # creates customer object by accessing User id
"full_name": full_name}
customer_serializer = CustomerSerializer(data=customer) # serializes customer data
customer_serializer.is_valid(raise_exception=True)
customer_serializer.save() # creates and save customer to db
customer_instance = Customer.objects.get(full_name=customer_serializer.data["full_name"])
customer_data = CustomerSerializerDetail(
customer_instance) # uses the customer instance to access all the users attributes
current_site = get_current_site(
request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('verify-email') # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink + "?token=" + str(
token) # this is the link that will be sent to new user to click on
email_subject = 'Welcome To OgaTailor'
email_body = f'''
Hello {user.username}, Welcome to OgaTailor, we are delighted to have you on board!
<br><br><b>Note: <i>Please click the link below to verify your account.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>It expires in 10 minutes.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response(customer_data.data, status=status.HTTP_201_CREATED)
class VerifyEmailView(views.APIView):
serializer_class = EmailVerificationSerializer
token_param_config = openapi.Parameter('token', in_=openapi.IN_QUERY, description='Description',
type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param_config])
def get(self, request):
token = request.GET.get('token') # get the token from the user when they hit our view
try:
payload = jwt.decode(token,
settings.SECRET_KEY) # here we are truing to access the informattion encoded in to the link. Functionality comes with jwt
user = User.objects.get(id=payload['user_id']) # here we extract the user from the payload
if not user.is_verified: # check that the user is not already verified so as to reduce the db queries
user.is_verified = True
user.email_verified = True
user.save()
return Response({'email': 'Successfully activated'}, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
return Response({'error': 'This activation link has expired. Please request for a new one.'},
status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.DecodeError:
return Response({'error': 'Invalid token, request a new one.'}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
user = request.data
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status.HTTP_200_OK) | serializer_class = LogoutSerializer
def post(self, request):
logout(request)
data = {'Success': 'Logout Successful'}
return Response(data=data, status=status.HTTP_200_OK)
class RequestPasswordEmailView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data['email']
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(smart_bytes(user.id))
token = PasswordResetTokenGenerator().make_token(user)
current_site = get_current_site(
request=request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('password-reset-confirm', kwargs={'uidb64': uidb64,
'token': token}) # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink # this is the link that will be sent to user to click on
email_subject = 'Password Reset'
email_body = f'''
Hello, \n You have requested a password reset!
<br><br><b>Note: <i>Please click the link below to reset your password.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>If you did not request this change, disregard this email.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response({'success': "We have sent you a link to reset your password"}, status=status.HTTP_200_OK)
class PasswordTokenCheckView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def get(self, request, uidb64, token):
# redirect_url = request.GET.get('redirect_url')
try:
id = smart_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': 'Token is not valid, please request a new one'})
return Response({'success': True, 'message': 'Credentials valid', 'uidb64': uidb64, 'token': token})
except DjangoUnicodeDecodeError as identifier:
if not PasswordResetTokenGenerator().check_token(user):
return Response({'error': 'Token is not valid, please request for a ew one'})
class SetNewPasswordAPIView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'success': True, 'message': 'Password reset successful'}, status=status.HTTP_200_OK)
class SendSmsView(generics.GenericAPIView):
serializer_class = PhoneNumberSerializer
otp = None
@transaction.atomic()
def post(self, request, otp=None):
data = request.data
email = data['email']
user = User.objects.get(email=email)
phone_number_valid = PhoneNumberSerializer(data=data)
if not phone_number_valid.is_valid():
return Response({'errors': 'Invalid phone number'})
phone_number = data['phone_number']
otp = self.otp
if otp is None:
otp = generate_otp()
user.otp_code = otp
user.phone_number = phone_number
expiry = timezone.now() + timedelta(minutes=30)
user.otp_code_expiry = expiry
user.save()
try:
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
message_to_broadcast = f'Your OgaTailor Verification code is {otp}'
client.messages.create(to=phone_number, from_=settings.TWILIO_NUMBER, body=message_to_broadcast)
return Response({'message': 'OTP Sent!', 'otp': otp })
except:
return Response({'errors': 'Having problems sending code'})
class VerifyOtpView(generics.GenericAPIView):
serializer_class = OtpSerializer
def post(self, request):
data = request.data
user = get_user_model().objects.filter(email=data['email'])
if not user.exists():
return Response({'errors': 'You are not registered'})
user = user[0]
if user.otp_code != data['otp_code']:
return Response({'errors': 'Please provide a valid OTP'})
otp_expired = OtpSerializer(data=data)
if not otp_expired:
return Response({'errors': 'OTP provided has expired'})
user.phone_verified = True
user.save()
return Response({'message': 'Phone Verified!'}) |
class LogoutView(generics.GenericAPIView): | random_line_split |
views.py | from rest_framework import generics, status, views
from .serializers import (RegisterSerializer, EmailVerificationSerializer, LoginSerializer,
CustomerSerializer, CustomerSerializerDetail, LogoutSerializer, ResetPasswordSerializer,
SetNewPasswordSerializer, PhoneNumberSerializer, OtpSerializer)
from rest_framework.response import Response
from .models import User, Customer, Admin
from django.db import transaction
from django.contrib.auth import logout
from rest_framework_simplejwt.tokens import RefreshToken
from .utils import email_template, generate_otp
from django.contrib.sites.shortcuts import get_current_site
import jwt
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse # takes url name and gives us the path
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, force_str, smart_bytes, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from twilio.rest import Client
from datetime import timedelta
from django.utils import timezone
class RegisterView(generics.GenericAPIView):
"""View for registering new users"""
serializer_class = RegisterSerializer
@transaction.atomic()
def post(self, request):
'''Sends a post request to create a new user'''
user = request.data # gets details passed from the request and assigns it to user
full_name = request.data.get('full_name')
serializer = RegisterSerializer(
data=user) # serializes and validates the data sent in request by passing it to register serializer
serializer.is_valid(raise_exception=True) # confirms that the data in serializer is indeed valid
serializer.save() # creates and saves this data which is user to db
user_data = serializer.data # user data is the data that the serializer has saved
user = User.objects.get(
email=user_data['email']) # initializes a user by fetching it from the db using the users email
token = RefreshToken.for_user(user).access_token # generates and ties a token to the users email passed to it
customer = {"user_id": str(user.id), # creates customer object by accessing User id
"full_name": full_name}
customer_serializer = CustomerSerializer(data=customer) # serializes customer data
customer_serializer.is_valid(raise_exception=True)
customer_serializer.save() # creates and save customer to db
customer_instance = Customer.objects.get(full_name=customer_serializer.data["full_name"])
customer_data = CustomerSerializerDetail(
customer_instance) # uses the customer instance to access all the users attributes
current_site = get_current_site(
request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('verify-email') # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink + "?token=" + str(
token) # this is the link that will be sent to new user to click on
email_subject = 'Welcome To OgaTailor'
email_body = f'''
Hello {user.username}, Welcome to OgaTailor, we are delighted to have you on board!
<br><br><b>Note: <i>Please click the link below to verify your account.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>It expires in 10 minutes.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response(customer_data.data, status=status.HTTP_201_CREATED)
class VerifyEmailView(views.APIView):
serializer_class = EmailVerificationSerializer
token_param_config = openapi.Parameter('token', in_=openapi.IN_QUERY, description='Description',
type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param_config])
def get(self, request):
token = request.GET.get('token') # get the token from the user when they hit our view
try:
payload = jwt.decode(token,
settings.SECRET_KEY) # here we are truing to access the informattion encoded in to the link. Functionality comes with jwt
user = User.objects.get(id=payload['user_id']) # here we extract the user from the payload
if not user.is_verified: # check that the user is not already verified so as to reduce the db queries
user.is_verified = True
user.email_verified = True
user.save()
return Response({'email': 'Successfully activated'}, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
return Response({'error': 'This activation link has expired. Please request for a new one.'},
status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.DecodeError:
return Response({'error': 'Invalid token, request a new one.'}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
user = request.data
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status.HTTP_200_OK)
class LogoutView(generics.GenericAPIView):
serializer_class = LogoutSerializer
def post(self, request):
logout(request)
data = {'Success': 'Logout Successful'}
return Response(data=data, status=status.HTTP_200_OK)
class RequestPasswordEmailView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data['email']
if User.objects.filter(email=email).exists():
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(smart_bytes(user.id))
token = PasswordResetTokenGenerator().make_token(user)
current_site = get_current_site(
request=request).domain # you want the user to be directed back to your site(this site) when they click the registration link
relativeLink = reverse('password-reset-confirm', kwargs={'uidb64': uidb64,
'token': token}) # takes the url name passed and gives us the path
absolute = 'http://' + current_site + relativeLink # this is the link that will be sent to user to click on
email_subject = 'Password Reset'
email_body = f'''
Hello, \n You have requested a password reset!
<br><br><b>Note: <i>Please click the link below to reset your password.</i> </b>
<br><br><b> <i>{absolute}</i> </b>
<br><br><b>Note: <i>If you did not request this change, disregard this email.</i> </b>'''
email_template(email_subject, user.email, email_body)
return Response({'success': "We have sent you a link to reset your password"}, status=status.HTTP_200_OK)
class PasswordTokenCheckView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def get(self, request, uidb64, token):
# redirect_url = request.GET.get('redirect_url')
try:
id = smart_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': 'Token is not valid, please request a new one'})
return Response({'success': True, 'message': 'Credentials valid', 'uidb64': uidb64, 'token': token})
except DjangoUnicodeDecodeError as identifier:
if not PasswordResetTokenGenerator().check_token(user):
return Response({'error': 'Token is not valid, please request for a ew one'})
class SetNewPasswordAPIView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({'success': True, 'message': 'Password reset successful'}, status=status.HTTP_200_OK)
class SendSmsView(generics.GenericAPIView):
serializer_class = PhoneNumberSerializer
otp = None
@transaction.atomic()
def post(self, request, otp=None):
data = request.data
email = data['email']
user = User.objects.get(email=email)
phone_number_valid = PhoneNumberSerializer(data=data)
if not phone_number_valid.is_valid():
return Response({'errors': 'Invalid phone number'})
phone_number = data['phone_number']
otp = self.otp
if otp is None:
|
user.otp_code = otp
user.phone_number = phone_number
expiry = timezone.now() + timedelta(minutes=30)
user.otp_code_expiry = expiry
user.save()
try:
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
message_to_broadcast = f'Your OgaTailor Verification code is {otp}'
client.messages.create(to=phone_number, from_=settings.TWILIO_NUMBER, body=message_to_broadcast)
return Response({'message': 'OTP Sent!', 'otp': otp })
except:
return Response({'errors': 'Having problems sending code'})
class VerifyOtpView(generics.GenericAPIView):
serializer_class = OtpSerializer
def post(self, request):
data = request.data
user = get_user_model().objects.filter(email=data['email'])
if not user.exists():
return Response({'errors': 'You are not registered'})
user = user[0]
if user.otp_code != data['otp_code']:
return Response({'errors': 'Please provide a valid OTP'})
otp_expired = OtpSerializer(data=data)
if not otp_expired:
return Response({'errors': 'OTP provided has expired'})
user.phone_verified = True
user.save()
return Response({'message': 'Phone Verified!'})
| otp = generate_otp() | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.