file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
client.ts | 'use strict'
import { IncomingMessage } from 'http'
import request from 'request'
import jwt from 'jsonwebtoken'
import createError from 'http-errors'
import { UA } from './ua'
const $setTimeout = setTimeout
const MONGO_REG = /^[0-9a-f]{24}$/i
// Network Errors, exclude 'ETIMEDOUT' and 'ESOCKETTIMEDOUT'
// https://github.com/teambition/tws-auth/issues/15
const RETRIABLE_ERRORS = ['ECONNRESET', 'ENOTFOUND', 'ECONNREFUSED', 'EHOSTUNREACH', 'EPIPE', 'EAI_AGAIN']
const FORWARD_HEADERS = ['x-request-id', 'x-canary']
/**
* Options for request retrying.
*/
export interface RetryOptions {
retryDelay?: number // (default) wait for 2000 ms before trying again
maxAttempts?: number // (default) try 3 times
retryErrorCodes?: string[]
}
/**
* Extra attributes for response.
*/
export interface ExtraResponse {
attempts: number
originalUrl: string
originalMethod: string
}
/**
* Options for request Client.
*/
export interface ClientOptions {
appId: string
appSecrets: string[]
timeout?: number
host?: string
pool?: any
maxSockets?: number
strictSSL?: boolean
time?: boolean
certChain?: Buffer
privateKey?: Buffer
rootCert?: string | Buffer | string[] | Buffer[]
useQuerystring?: boolean
}
export interface Payload { [key: string]: any }
export type RequestOptions = request.CoreOptions & RetryOptions
export type Response = request.Response & ExtraResponse
/**
* Client for teambition web service.
*/
export class Client {
/**
* a retryable request, wrap of https://github.com/request/request.
* When the connection fails with one of ECONNRESET, ENOTFOUND, ESOCKETTIMEDOUT, ETIMEDOUT,
* ECONNREFUSED, EHOSTUNREACH, EPIPE, EAI_AGAIN, the request will automatically be re-attempted as
* these are often recoverable errors and will go away on retry.
* @param options request options.
* @returns a promise with Response.
*/
public static async request (options: RequestOptions & request.UrlOptions): Promise<Response> {
const retryDelay = options.retryDelay != null ? Math.floor(options.retryDelay) : 2000
const maxAttempts = options.maxAttempts != null ? Math.floor(options.maxAttempts) : 3
const retryErrorCodes = Array.isArray(options.retryErrorCodes) ? options.retryErrorCodes : RETRIABLE_ERRORS
// default to `false`
options.followRedirect = options.followRedirect === true
let err = null
let attempts = 0
while (attempts < maxAttempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) {
headers = FORWARD_HEADERS
}
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
| (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some JWT verify options.
* @returns a literal object.
*/
verifyToken (token: string, options?: jwt.VerifyOptions): Payload {
let error = null
for (const secret of this._options.appSecrets) {
try {
return jwt.verify(token, secret, options) as Payload
} catch (err) {
error = err
}
}
throw createError(401, error)
}
/**
* request with given method, url and data.
* It will genenrate a jwt token by signToken, and set to 'Authorization' header.
* It will merge headers, query and request options that preset into client.
* @param method method to request.
* @param url url to request, it will be resolved with client host.
* @param data data to request.
* @returns a promise with Response
*/
request (method: string, url: string, data?: any) {
// token change in every hour, optimizing for server cache.
const token = this.signAppToken()
const options: RequestOptions & request.UrlOptions = Object.assign({ url: '' }, this._requestOptions)
options.method = method.toUpperCase()
options.url = urlJoin(this._host, url)
options.qs = Object.assign({}, options.qs, this._query)
options.headers =
Object.assign({}, options.headers, this._headers, { Authorization: `Bearer ${token}` })
if (data != null) {
if (options.method === 'GET') {
options.qs = Object.assign(options.qs, data)
} else {
options.body = data
}
}
return Client.request(options).then((resp) => {
if (resp.statusCode === 200 && Number(resp.headers['x-http-status']) > 0) {
resp.statusCode = Number(resp.headers['x-http-status'])
}
return resp
})
}
/**
* request with `GET` method.
* @returns a promise with Response body
*/
get<T> (url: string, data?: any) {
return this.request('GET', url, data).then(assertRes) as Promise<T>
}
/**
* request with `POST` method.
* @returns a promise with Response body
*/
post<T> (url: string, data?: any) {
return this.request('POST', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PUT` method.
* @returns a promise with Response body
*/
put<T> (url: string, data?: any) {
return this.request('PUT', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PATCH` method.
* @returns a promise with Response body
*/
patch<T> (url: string, data?: any) {
return this.request('PATCH', url, data).then(assertRes) as Promise<T>
}
/**
* request with `DELETE` method.
* @returns a promise with Response body
*/
delete<T> (url: string, data?: any) {
return this.request('DELETE', url, data).then(assertRes) as Promise<T>
}
}
/**.
* @returns true if response' statusCode in [200, 300)
*/
export function isSuccess (res: request.RequestResponse) {
return res.statusCode >= 200 && res.statusCode < 300
}
/**.
* @returns a promise that delay with given ms time.
*/
export function delay (ms: number) {
return new Promise((resolve) => $setTimeout(resolve, ms))
}
/**.
* @returns a Response body or throw a error.
*/
export function assertRes<T> (res: Response): T {
if (isSuccess(res) && typeof res.body !== 'string') {
return res.body as T
}
// 追加额外的信息,方便调试
// 注意,不要把调试信息直接返给客户端
const err = createError(res.statusCode, res.statusMessage, {
originalUrl: res.originalUrl,
originalMethod: res.originalMethod,
headers: res.headers,
body: res.body,
elapsedTime: res.elapsedTime == null ? 0 : res.elapsedTime,
timingPhases: res.timingPhases == null ? {} : res.timingPhases,
})
// 标准的 Teambition Web Service 错误响应应该包含 `error` 和 `message` 两个 string 属性
// 其中 error 为错误码,形如 "InvalidPassword", "UserNotFound",客户端可以根据该错误码进行 i18n 错误提示处理
// message 则为英文版的详细错误提示
if (typeof res.body === 'string') {
err.message = res.body
} else if (res.body != null) {
err.name = err.error = res.body.error == null ? err.name : res.body.error
if (res.body.message != null) {
err.message = res.body.message
}
}
throw err
}
// 简单的 url join,未考虑异常输入,这里不能使用 url.resolve,会丢失 path
export function urlJoin (base: string = '', to: string = ''): string {
if (base !== '' && to !== '') {
if (base.endsWith('/')) {
base = base.slice(0, -1)
}
if (!to.startsWith('/')) {
to = '/' + to
}
}
return base + to
}
| signAppToken | identifier_name |
client.ts | 'use strict'
import { IncomingMessage } from 'http'
import request from 'request'
import jwt from 'jsonwebtoken'
import createError from 'http-errors'
import { UA } from './ua'
const $setTimeout = setTimeout
const MONGO_REG = /^[0-9a-f]{24}$/i
// Network Errors, exclude 'ETIMEDOUT' and 'ESOCKETTIMEDOUT'
// https://github.com/teambition/tws-auth/issues/15
const RETRIABLE_ERRORS = ['ECONNRESET', 'ENOTFOUND', 'ECONNREFUSED', 'EHOSTUNREACH', 'EPIPE', 'EAI_AGAIN']
const FORWARD_HEADERS = ['x-request-id', 'x-canary']
/**
* Options for request retrying.
*/
export interface RetryOptions {
retryDelay?: number // (default) wait for 2000 ms before trying again
maxAttempts?: number // (default) try 3 times
retryErrorCodes?: string[]
}
/**
* Extra attributes for response.
*/
export interface ExtraResponse {
attempts: number
originalUrl: string
originalMethod: string
}
/**
* Options for request Client.
*/
export interface ClientOptions {
appId: string
appSecrets: string[]
timeout?: number
host?: string
pool?: any
maxSockets?: number
strictSSL?: boolean
time?: boolean
certChain?: Buffer
privateKey?: Buffer
rootCert?: string | Buffer | string[] | Buffer[]
useQuerystring?: boolean
}
export interface Payload { [key: string]: any }
export type RequestOptions = request.CoreOptions & RetryOptions
export type Response = request.Response & ExtraResponse
/**
* Client for teambition web service.
*/
export class Client {
/**
* a retryable request, wrap of https://github.com/request/request.
* When the connection fails with one of ECONNRESET, ENOTFOUND, ESOCKETTIMEDOUT, ETIMEDOUT,
* ECONNREFUSED, EHOSTUNREACH, EPIPE, EAI_AGAIN, the request will automatically be re-attempted as
* these are often recoverable errors and will go away on retry.
* @param options request options.
* @returns a promise with Response.
*/
public static async request (options: RequestOptions & request.UrlOptions): Promise<Response> {
const retryDelay = options.retryDelay != null ? Math.floor(options.retryDelay) : 2000
const maxAttempts = options.maxAttempts != null ? Math.floor(options.maxAttempts) : 3
const retryErrorCodes = Array.isArray(options.retryErrorCodes) ? options.retryErrorCodes : RETRIABLE_ERRORS
// default to `false`
options.followRedirect = options.followRedirect === true
let err = null
let attempts = 0
while (attempts < maxAttempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) {
headers = FORWARD_HEADERS
}
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
signAppToken (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some JWT verify options.
* @returns a literal object.
*/
verifyToken (token: string, options?: jwt.VerifyOptions): Payload {
let error = null
for (const secret of this._options.appSecrets) {
try {
return jwt.verify(token, secret, options) as Payload
} catch (err) {
error = err
}
}
throw createError(401, error)
} | * request with given method, url and data.
* It will genenrate a jwt token by signToken, and set to 'Authorization' header.
* It will merge headers, query and request options that preset into client.
* @param method method to request.
* @param url url to request, it will be resolved with client host.
* @param data data to request.
* @returns a promise with Response
*/
request (method: string, url: string, data?: any) {
// token change in every hour, optimizing for server cache.
const token = this.signAppToken()
const options: RequestOptions & request.UrlOptions = Object.assign({ url: '' }, this._requestOptions)
options.method = method.toUpperCase()
options.url = urlJoin(this._host, url)
options.qs = Object.assign({}, options.qs, this._query)
options.headers =
Object.assign({}, options.headers, this._headers, { Authorization: `Bearer ${token}` })
if (data != null) {
if (options.method === 'GET') {
options.qs = Object.assign(options.qs, data)
} else {
options.body = data
}
}
return Client.request(options).then((resp) => {
if (resp.statusCode === 200 && Number(resp.headers['x-http-status']) > 0) {
resp.statusCode = Number(resp.headers['x-http-status'])
}
return resp
})
}
/**
* request with `GET` method.
* @returns a promise with Response body
*/
get<T> (url: string, data?: any) {
return this.request('GET', url, data).then(assertRes) as Promise<T>
}
/**
* request with `POST` method.
* @returns a promise with Response body
*/
post<T> (url: string, data?: any) {
return this.request('POST', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PUT` method.
* @returns a promise with Response body
*/
put<T> (url: string, data?: any) {
return this.request('PUT', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PATCH` method.
* @returns a promise with Response body
*/
patch<T> (url: string, data?: any) {
return this.request('PATCH', url, data).then(assertRes) as Promise<T>
}
/**
* request with `DELETE` method.
* @returns a promise with Response body
*/
delete<T> (url: string, data?: any) {
return this.request('DELETE', url, data).then(assertRes) as Promise<T>
}
}
/**.
* @returns true if response' statusCode in [200, 300)
*/
export function isSuccess (res: request.RequestResponse) {
return res.statusCode >= 200 && res.statusCode < 300
}
/**.
* @returns a promise that delay with given ms time.
*/
export function delay (ms: number) {
return new Promise((resolve) => $setTimeout(resolve, ms))
}
/**.
* @returns a Response body or throw a error.
*/
export function assertRes<T> (res: Response): T {
if (isSuccess(res) && typeof res.body !== 'string') {
return res.body as T
}
// 追加额外的信息,方便调试
// 注意,不要把调试信息直接返给客户端
const err = createError(res.statusCode, res.statusMessage, {
originalUrl: res.originalUrl,
originalMethod: res.originalMethod,
headers: res.headers,
body: res.body,
elapsedTime: res.elapsedTime == null ? 0 : res.elapsedTime,
timingPhases: res.timingPhases == null ? {} : res.timingPhases,
})
// 标准的 Teambition Web Service 错误响应应该包含 `error` 和 `message` 两个 string 属性
// 其中 error 为错误码,形如 "InvalidPassword", "UserNotFound",客户端可以根据该错误码进行 i18n 错误提示处理
// message 则为英文版的详细错误提示
if (typeof res.body === 'string') {
err.message = res.body
} else if (res.body != null) {
err.name = err.error = res.body.error == null ? err.name : res.body.error
if (res.body.message != null) {
err.message = res.body.message
}
}
throw err
}
// 简单的 url join,未考虑异常输入,这里不能使用 url.resolve,会丢失 path
export function urlJoin (base: string = '', to: string = ''): string {
if (base !== '' && to !== '') {
if (base.endsWith('/')) {
base = base.slice(0, -1)
}
if (!to.startsWith('/')) {
to = '/' + to
}
}
return base + to
} |
/** | random_line_split |
client.ts | 'use strict'
import { IncomingMessage } from 'http'
import request from 'request'
import jwt from 'jsonwebtoken'
import createError from 'http-errors'
import { UA } from './ua'
const $setTimeout = setTimeout
const MONGO_REG = /^[0-9a-f]{24}$/i
// Network Errors, exclude 'ETIMEDOUT' and 'ESOCKETTIMEDOUT'
// https://github.com/teambition/tws-auth/issues/15
const RETRIABLE_ERRORS = ['ECONNRESET', 'ENOTFOUND', 'ECONNREFUSED', 'EHOSTUNREACH', 'EPIPE', 'EAI_AGAIN']
const FORWARD_HEADERS = ['x-request-id', 'x-canary']
/**
* Options for request retrying.
*/
export interface RetryOptions {
retryDelay?: number // (default) wait for 2000 ms before trying again
maxAttempts?: number // (default) try 3 times
retryErrorCodes?: string[]
}
/**
* Extra attributes for response.
*/
export interface ExtraResponse {
attempts: number
originalUrl: string
originalMethod: string
}
/**
* Options for request Client.
*/
export interface ClientOptions {
appId: string
appSecrets: string[]
timeout?: number
host?: string
pool?: any
maxSockets?: number
strictSSL?: boolean
time?: boolean
certChain?: Buffer
privateKey?: Buffer
rootCert?: string | Buffer | string[] | Buffer[]
useQuerystring?: boolean
}
export interface Payload { [key: string]: any }
export type RequestOptions = request.CoreOptions & RetryOptions
export type Response = request.Response & ExtraResponse
/**
* Client for teambition web service.
*/
export class Client {
/**
* a retryable request, wrap of https://github.com/request/request.
* When the connection fails with one of ECONNRESET, ENOTFOUND, ESOCKETTIMEDOUT, ETIMEDOUT,
* ECONNREFUSED, EHOSTUNREACH, EPIPE, EAI_AGAIN, the request will automatically be re-attempted as
* these are often recoverable errors and will go away on retry.
* @param options request options.
* @returns a promise with Response.
*/
public static async request (options: RequestOptions & request.UrlOptions): Promise<Response> {
const retryDelay = options.retryDelay != null ? Math.floor(options.retryDelay) : 2000
const maxAttempts = options.maxAttempts != null ? Math.floor(options.maxAttempts) : 3
const retryErrorCodes = Array.isArray(options.retryErrorCodes) ? options.retryErrorCodes : RETRIABLE_ERRORS
// default to `false`
options.followRedirect = options.followRedirect === true
let err = null
let attempts = 0
while (attempts < maxAttempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) |
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
signAppToken (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some JWT verify options.
* @returns a literal object.
*/
verifyToken (token: string, options?: jwt.VerifyOptions): Payload {
let error = null
for (const secret of this._options.appSecrets) {
try {
return jwt.verify(token, secret, options) as Payload
} catch (err) {
error = err
}
}
throw createError(401, error)
}
/**
* request with given method, url and data.
* It will genenrate a jwt token by signToken, and set to 'Authorization' header.
* It will merge headers, query and request options that preset into client.
* @param method method to request.
* @param url url to request, it will be resolved with client host.
* @param data data to request.
* @returns a promise with Response
*/
request (method: string, url: string, data?: any) {
// token change in every hour, optimizing for server cache.
const token = this.signAppToken()
const options: RequestOptions & request.UrlOptions = Object.assign({ url: '' }, this._requestOptions)
options.method = method.toUpperCase()
options.url = urlJoin(this._host, url)
options.qs = Object.assign({}, options.qs, this._query)
options.headers =
Object.assign({}, options.headers, this._headers, { Authorization: `Bearer ${token}` })
if (data != null) {
if (options.method === 'GET') {
options.qs = Object.assign(options.qs, data)
} else {
options.body = data
}
}
return Client.request(options).then((resp) => {
if (resp.statusCode === 200 && Number(resp.headers['x-http-status']) > 0) {
resp.statusCode = Number(resp.headers['x-http-status'])
}
return resp
})
}
/**
* request with `GET` method.
* @returns a promise with Response body
*/
get<T> (url: string, data?: any) {
return this.request('GET', url, data).then(assertRes) as Promise<T>
}
/**
* request with `POST` method.
* @returns a promise with Response body
*/
post<T> (url: string, data?: any) {
return this.request('POST', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PUT` method.
* @returns a promise with Response body
*/
put<T> (url: string, data?: any) {
return this.request('PUT', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PATCH` method.
* @returns a promise with Response body
*/
patch<T> (url: string, data?: any) {
return this.request('PATCH', url, data).then(assertRes) as Promise<T>
}
/**
* request with `DELETE` method.
* @returns a promise with Response body
*/
delete<T> (url: string, data?: any) {
return this.request('DELETE', url, data).then(assertRes) as Promise<T>
}
}
/**.
* @returns true if response' statusCode in [200, 300)
*/
export function isSuccess (res: request.RequestResponse) {
return res.statusCode >= 200 && res.statusCode < 300
}
/**.
* @returns a promise that delay with given ms time.
*/
export function delay (ms: number) {
return new Promise((resolve) => $setTimeout(resolve, ms))
}
/**.
* @returns a Response body or throw a error.
*/
export function assertRes<T> (res: Response): T {
if (isSuccess(res) && typeof res.body !== 'string') {
return res.body as T
}
// 追加额外的信息,方便调试
// 注意,不要把调试信息直接返给客户端
const err = createError(res.statusCode, res.statusMessage, {
originalUrl: res.originalUrl,
originalMethod: res.originalMethod,
headers: res.headers,
body: res.body,
elapsedTime: res.elapsedTime == null ? 0 : res.elapsedTime,
timingPhases: res.timingPhases == null ? {} : res.timingPhases,
})
// 标准的 Teambition Web Service 错误响应应该包含 `error` 和 `message` 两个 string 属性
// 其中 error 为错误码,形如 "InvalidPassword", "UserNotFound",客户端可以根据该错误码进行 i18n 错误提示处理
// message 则为英文版的详细错误提示
if (typeof res.body === 'string') {
err.message = res.body
} else if (res.body != null) {
err.name = err.error = res.body.error == null ? err.name : res.body.error
if (res.body.message != null) {
err.message = res.body.message
}
}
throw err
}
// 简单的 url join,未考虑异常输入,这里不能使用 url.resolve,会丢失 path
export function urlJoin (base: string = '', to: string = ''): string {
if (base !== '' && to !== '') {
if (base.endsWith('/')) {
base = base.slice(0, -1)
}
if (!to.startsWith('/')) {
to = '/' + to
}
}
return base + to
}
| {
headers = FORWARD_HEADERS
} | conditional_block |
cluster.go | // Package cluster holds the cluster CRD logic and definitions
// A cluster is comprised of a primary service, replica service,
// primary deployment, and replica deployment
package cluster
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/crunchydata/postgres-operator/internal/config"
"github.com/crunchydata/postgres-operator/internal/kubeapi"
"github.com/crunchydata/postgres-operator/internal/operator"
"github.com/crunchydata/postgres-operator/internal/operator/backrest"
"github.com/crunchydata/postgres-operator/internal/operator/pvc"
"github.com/crunchydata/postgres-operator/internal/util"
crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1"
"github.com/crunchydata/postgres-operator/pkg/events"
log "github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// ServiceTemplateFields ...
type ServiceTemplateFields struct {
Name string
ServiceName string
ClusterName string
Port string
PGBadgerPort string
ExporterPort string
ServiceType string
}
// ReplicaSuffix ...
const ReplicaSuffix = "-replica"
// contstants defining the names of the various sidecar containers
const (
collectCCPImage = "crunchy-collect"
pgBadgerCCPImage = "crunchy-pgbadger"
crunchyadmCCPImage = "crunchy-admin"
)
func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace string) {
var err error
if cl.Spec.Status == crv1.CompletedStatus {
errorMsg := "crv1 pgcluster " + cl.Spec.ClusterName + " is already marked complete, will not recreate"
log.Warn(errorMsg)
publishClusterCreateFailure(cl, errorMsg)
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cl, namespace, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.PrimaryStorage)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err = addClusterCreateMissingService(clientset, cl, namespace); err != nil {
log.Error("error in creating primary service " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
// Create a configMap for the cluster that will be utilized to configure whether or not
// initialization logic should be executed when the postgres-ha container is run. This
// ensures that the original primary in a PG cluster does not attempt to run any initialization
// logic following a restart of the container.
// If the configmap already exists, the cluster creation will continue as this is required
// for certain pgcluster upgrades.
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err := annotateBackrestSecret(clientset, cl); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
if err := addClusterDeployments(clientset, cl, namespace,
dataVolume, walVolume, tablespaceVolumes); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
// Now scale the repo deployment only to ensure it is initialized prior to the primary DB.
// Once the repo is ready, the primary database deployment will then also be scaled to 1.
clusterInfo, err := ScaleClusterDeployments(clientset, *cl, 1, false, false, true, false)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
log.Debugf("Scaled pgBackRest repo deployment %s to 1 to proceed with initializing "+
"cluster %s", clusterInfo.PrimaryDeployment, cl.GetName())
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/PrimaryStorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//publish create cluster event
//capture the cluster creation event
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateCluster,
},
Clustername: cl.ObjectMeta.Name,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
}
//add replicas if requested
if cl.Spec.Replicas != "" {
replicaCount, err := strconv.Atoi(cl.Spec.Replicas)
if err != nil {
log.Error("error in replicas value " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
//create a CRD for each replica
for i := 0; i < replicaCount; i++ {
spec := crv1.PgreplicaSpec{}
//get the storage config
spec.ReplicaStorage = cl.Spec.ReplicaStorage
spec.UserLabels = cl.Spec.UserLabels
//the replica should not use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CRD for this replica
_, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
DeleteReplica(clientset, replica, namespace)
//publish event for scale down
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleDownClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleDownCluster,
},
Clustername: replica.Spec.ClusterName,
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
return
}
} | // UpdateResources updates the PostgreSQL instance Deployments to reflect the
// update resources (i.e. CPU, memory)
func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error {
// get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
// iterate through each PostgreSQL instance deployment and update the
// resource values for the database container
//
// NOTE: a future version (near future) will first try to detect the primary
// so that all the replicas are updated first, and then the primary gets the
// update
for _, deployment := range deployments.Items {
// first, initialize the requests/limits resource to empty Resource Lists
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
// now, simply deep copy the values from the CRD
if cluster.Spec.Resources != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()
}
if cluster.Spec.Limits != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// update the deployment with the new values
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// UpdateTablespaces updates the PostgreSQL instance Deployments to update
// what tablespaces are mounted.
// Though any new tablespaces are present in the CRD, to attempt to do less work
// this function takes a map of the new tablespaces that are being added, so we
// only have to check and create the PVCs that are being mounted at this time
//
// To do this, iterate through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) {
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFailureFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateClusterFailure,
},
Clustername: cl.ObjectMeta.Name,
ErrorMessage: errorMsg,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err := events.Publish(f)
if err != nil {
log.Error(err.Error())
}
}
func publishClusterShutdown(cluster crv1.Pgcluster) error {
clusterName := cluster.Name
//capture the cluster creation event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventShutdownClusterFormat{
EventHeader: events.EventHeader{
Namespace: cluster.Namespace,
Username: cluster.Spec.UserLabels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventShutdownCluster,
},
Clustername: clusterName,
}
if err := events.Publish(f); err != nil {
log.Error(err.Error())
return err
}
return nil
}
// stopPostgreSQLInstance is a proxy function for the main
// StopPostgreSQLInstance function, as it preps a Deployment to have its
// PostgreSQL instance shut down. This helps to ensure that a PostgreSQL
// instance will launch and not be in crash recovery mode
func stopPostgreSQLInstance(clientset kubernetes.Interface, restConfig *rest.Config, deployment apps_v1.Deployment) error {
// First, attempt to get the PostgreSQL instance Pod attachd to this
// particular deployment
selector := fmt.Sprintf("%s=%s", config.LABEL_DEPLOYMENT_NAME, deployment.Name)
pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector})
// if there is a bona fide error, return.
// However, if no Pods are found, issue a warning, but do not return an error
// This likely means that PostgreSQL is already shutdown, but hey, it's the
// cloud
if err != nil {
return err
} else if len(pods.Items) == 0 {
log.Infof("not shutting down PostgreSQL instance [%s] as the Pod cannot be found", deployment.Name)
return nil
}
// get the first pod off the items list
pod := pods.Items[0]
// now we can shut down the cluster
if err := util.StopPostgreSQLInstance(clientset, restConfig, &pod, deployment.Name); err != nil {
return err
}
return nil
} | random_line_split | |
cluster.go | // Package cluster holds the cluster CRD logic and definitions
// A cluster is comprised of a primary service, replica service,
// primary deployment, and replica deployment
package cluster
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/crunchydata/postgres-operator/internal/config"
"github.com/crunchydata/postgres-operator/internal/kubeapi"
"github.com/crunchydata/postgres-operator/internal/operator"
"github.com/crunchydata/postgres-operator/internal/operator/backrest"
"github.com/crunchydata/postgres-operator/internal/operator/pvc"
"github.com/crunchydata/postgres-operator/internal/util"
crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1"
"github.com/crunchydata/postgres-operator/pkg/events"
log "github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// ServiceTemplateFields ...
type ServiceTemplateFields struct {
Name string
ServiceName string
ClusterName string
Port string
PGBadgerPort string
ExporterPort string
ServiceType string
}
// ReplicaSuffix ...
const ReplicaSuffix = "-replica"
// contstants defining the names of the various sidecar containers
const (
collectCCPImage = "crunchy-collect"
pgBadgerCCPImage = "crunchy-pgbadger"
crunchyadmCCPImage = "crunchy-admin"
)
func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace string) {
var err error
if cl.Spec.Status == crv1.CompletedStatus {
errorMsg := "crv1 pgcluster " + cl.Spec.ClusterName + " is already marked complete, will not recreate"
log.Warn(errorMsg)
publishClusterCreateFailure(cl, errorMsg)
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cl, namespace, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.PrimaryStorage)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err = addClusterCreateMissingService(clientset, cl, namespace); err != nil {
log.Error("error in creating primary service " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
// Create a configMap for the cluster that will be utilized to configure whether or not
// initialization logic should be executed when the postgres-ha container is run. This
// ensures that the original primary in a PG cluster does not attempt to run any initialization
// logic following a restart of the container.
// If the configmap already exists, the cluster creation will continue as this is required
// for certain pgcluster upgrades.
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err := annotateBackrestSecret(clientset, cl); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
if err := addClusterDeployments(clientset, cl, namespace,
dataVolume, walVolume, tablespaceVolumes); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
// Now scale the repo deployment only to ensure it is initialized prior to the primary DB.
// Once the repo is ready, the primary database deployment will then also be scaled to 1.
clusterInfo, err := ScaleClusterDeployments(clientset, *cl, 1, false, false, true, false)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
log.Debugf("Scaled pgBackRest repo deployment %s to 1 to proceed with initializing "+
"cluster %s", clusterInfo.PrimaryDeployment, cl.GetName())
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/PrimaryStorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//publish create cluster event
//capture the cluster creation event
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateCluster,
},
Clustername: cl.ObjectMeta.Name,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
}
//add replicas if requested
if cl.Spec.Replicas != "" {
replicaCount, err := strconv.Atoi(cl.Spec.Replicas)
if err != nil {
log.Error("error in replicas value " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
//create a CRD for each replica
for i := 0; i < replicaCount; i++ {
spec := crv1.PgreplicaSpec{}
//get the storage config
spec.ReplicaStorage = cl.Spec.ReplicaStorage
spec.UserLabels = cl.Spec.UserLabels
//the replica should not use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CRD for this replica
_, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
DeleteReplica(clientset, replica, namespace)
//publish event for scale down
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleDownClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleDownCluster,
},
Clustername: replica.Spec.ClusterName,
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
return
}
}
// UpdateResources updates the PostgreSQL instance Deployments to reflect the
// update resources (i.e. CPU, memory)
func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error {
// get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
// iterate through each PostgreSQL instance deployment and update the
// resource values for the database container
//
// NOTE: a future version (near future) will first try to detect the primary
// so that all the replicas are updated first, and then the primary gets the
// update
for _, deployment := range deployments.Items {
// first, initialize the requests/limits resource to empty Resource Lists
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
// now, simply deep copy the values from the CRD
if cluster.Spec.Resources != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()
}
if cluster.Spec.Limits != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// update the deployment with the new values
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// UpdateTablespaces updates the PostgreSQL instance Deployments to update
// what tablespaces are mounted.
// Though any new tablespaces are present in the CRD, to attempt to do less work
// this function takes a map of the new tablespaces that are being added, so we
// only have to check and create the PVCs that are being mounted at this time
//
// To do this, iterate through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func | (cl *crv1.Pgcluster, errorMsg string) {
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFailureFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateClusterFailure,
},
Clustername: cl.ObjectMeta.Name,
ErrorMessage: errorMsg,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err := events.Publish(f)
if err != nil {
log.Error(err.Error())
}
}
func publishClusterShutdown(cluster crv1.Pgcluster) error {
clusterName := cluster.Name
//capture the cluster creation event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventShutdownClusterFormat{
EventHeader: events.EventHeader{
Namespace: cluster.Namespace,
Username: cluster.Spec.UserLabels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventShutdownCluster,
},
Clustername: clusterName,
}
if err := events.Publish(f); err != nil {
log.Error(err.Error())
return err
}
return nil
}
// stopPostgreSQLInstance is a proxy function for the main
// StopPostgreSQLInstance function, as it preps a Deployment to have its
// PostgreSQL instance shut down. This helps to ensure that a PostgreSQL
// instance will launch and not be in crash recovery mode
func stopPostgreSQLInstance(clientset kubernetes.Interface, restConfig *rest.Config, deployment apps_v1.Deployment) error {
// First, attempt to get the PostgreSQL instance Pod attachd to this
// particular deployment
selector := fmt.Sprintf("%s=%s", config.LABEL_DEPLOYMENT_NAME, deployment.Name)
pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector})
// if there is a bona fide error, return.
// However, if no Pods are found, issue a warning, but do not return an error
// This likely means that PostgreSQL is already shutdown, but hey, it's the
// cloud
if err != nil {
return err
} else if len(pods.Items) == 0 {
log.Infof("not shutting down PostgreSQL instance [%s] as the Pod cannot be found", deployment.Name)
return nil
}
// get the first pod off the items list
pod := pods.Items[0]
// now we can shut down the cluster
if err := util.StopPostgreSQLInstance(clientset, restConfig, &pod, deployment.Name); err != nil {
return err
}
return nil
}
| publishClusterCreateFailure | identifier_name |
cluster.go | // Package cluster holds the cluster CRD logic and definitions
// A cluster is comprised of a primary service, replica service,
// primary deployment, and replica deployment
package cluster
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/crunchydata/postgres-operator/internal/config"
"github.com/crunchydata/postgres-operator/internal/kubeapi"
"github.com/crunchydata/postgres-operator/internal/operator"
"github.com/crunchydata/postgres-operator/internal/operator/backrest"
"github.com/crunchydata/postgres-operator/internal/operator/pvc"
"github.com/crunchydata/postgres-operator/internal/util"
crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1"
"github.com/crunchydata/postgres-operator/pkg/events"
log "github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// ServiceTemplateFields ...
type ServiceTemplateFields struct {
Name string
ServiceName string
ClusterName string
Port string
PGBadgerPort string
ExporterPort string
ServiceType string
}
// ReplicaSuffix ...
const ReplicaSuffix = "-replica"
// contstants defining the names of the various sidecar containers
const (
collectCCPImage = "crunchy-collect"
pgBadgerCCPImage = "crunchy-pgbadger"
crunchyadmCCPImage = "crunchy-admin"
)
func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace string) {
var err error
if cl.Spec.Status == crv1.CompletedStatus {
errorMsg := "crv1 pgcluster " + cl.Spec.ClusterName + " is already marked complete, will not recreate"
log.Warn(errorMsg)
publishClusterCreateFailure(cl, errorMsg)
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cl, namespace, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.PrimaryStorage)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err = addClusterCreateMissingService(clientset, cl, namespace); err != nil {
log.Error("error in creating primary service " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
// Create a configMap for the cluster that will be utilized to configure whether or not
// initialization logic should be executed when the postgres-ha container is run. This
// ensures that the original primary in a PG cluster does not attempt to run any initialization
// logic following a restart of the container.
// If the configmap already exists, the cluster creation will continue as this is required
// for certain pgcluster upgrades.
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err := annotateBackrestSecret(clientset, cl); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
if err := addClusterDeployments(clientset, cl, namespace,
dataVolume, walVolume, tablespaceVolumes); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
// Now scale the repo deployment only to ensure it is initialized prior to the primary DB.
// Once the repo is ready, the primary database deployment will then also be scaled to 1.
clusterInfo, err := ScaleClusterDeployments(clientset, *cl, 1, false, false, true, false)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
log.Debugf("Scaled pgBackRest repo deployment %s to 1 to proceed with initializing "+
"cluster %s", clusterInfo.PrimaryDeployment, cl.GetName())
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/PrimaryStorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//publish create cluster event
//capture the cluster creation event
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateCluster,
},
Clustername: cl.ObjectMeta.Name,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
}
//add replicas if requested
if cl.Spec.Replicas != "" {
replicaCount, err := strconv.Atoi(cl.Spec.Replicas)
if err != nil {
log.Error("error in replicas value " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
//create a CRD for each replica
for i := 0; i < replicaCount; i++ {
spec := crv1.PgreplicaSpec{}
//get the storage config
spec.ReplicaStorage = cl.Spec.ReplicaStorage
spec.UserLabels = cl.Spec.UserLabels
//the replica should not use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CRD for this replica
_, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
DeleteReplica(clientset, replica, namespace)
//publish event for scale down
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleDownClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleDownCluster,
},
Clustername: replica.Spec.ClusterName,
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
return
}
}
// UpdateResources updates the PostgreSQL instance Deployments to reflect the
// update resources (i.e. CPU, memory)
func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error {
// get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
// iterate through each PostgreSQL instance deployment and update the
// resource values for the database container
//
// NOTE: a future version (near future) will first try to detect the primary
// so that all the replicas are updated first, and then the primary gets the
// update
for _, deployment := range deployments.Items {
// first, initialize the requests/limits resource to empty Resource Lists
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
// now, simply deep copy the values from the CRD
if cluster.Spec.Resources != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()
}
if cluster.Spec.Limits != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// update the deployment with the new values
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// UpdateTablespaces updates the PostgreSQL instance Deployments to update
// what tablespaces are mounted.
// Though any new tablespaces are present in the CRD, to attempt to do less work
// this function takes a map of the new tablespaces that are being added, so we
// only have to check and create the PVCs that are being mounted at this time
//
// To do this, iterate through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) {
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFailureFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateClusterFailure,
},
Clustername: cl.ObjectMeta.Name,
ErrorMessage: errorMsg,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err := events.Publish(f)
if err != nil {
log.Error(err.Error())
}
}
func publishClusterShutdown(cluster crv1.Pgcluster) error |
// stopPostgreSQLInstance is a proxy function for the main
// StopPostgreSQLInstance function, as it preps a Deployment to have its
// PostgreSQL instance shut down. This helps to ensure that a PostgreSQL
// instance will launch and not be in crash recovery mode
func stopPostgreSQLInstance(clientset kubernetes.Interface, restConfig *rest.Config, deployment apps_v1.Deployment) error {
// First, attempt to get the PostgreSQL instance Pod attachd to this
// particular deployment
selector := fmt.Sprintf("%s=%s", config.LABEL_DEPLOYMENT_NAME, deployment.Name)
pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector})
// if there is a bona fide error, return.
// However, if no Pods are found, issue a warning, but do not return an error
// This likely means that PostgreSQL is already shutdown, but hey, it's the
// cloud
if err != nil {
return err
} else if len(pods.Items) == 0 {
log.Infof("not shutting down PostgreSQL instance [%s] as the Pod cannot be found", deployment.Name)
return nil
}
// get the first pod off the items list
pod := pods.Items[0]
// now we can shut down the cluster
if err := util.StopPostgreSQLInstance(clientset, restConfig, &pod, deployment.Name); err != nil {
return err
}
return nil
}
| {
clusterName := cluster.Name
//capture the cluster creation event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventShutdownClusterFormat{
EventHeader: events.EventHeader{
Namespace: cluster.Namespace,
Username: cluster.Spec.UserLabels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventShutdownCluster,
},
Clustername: clusterName,
}
if err := events.Publish(f); err != nil {
log.Error(err.Error())
return err
}
return nil
} | identifier_body |
cluster.go | // Package cluster holds the cluster CRD logic and definitions
// A cluster is comprised of a primary service, replica service,
// primary deployment, and replica deployment
package cluster
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/crunchydata/postgres-operator/internal/config"
"github.com/crunchydata/postgres-operator/internal/kubeapi"
"github.com/crunchydata/postgres-operator/internal/operator"
"github.com/crunchydata/postgres-operator/internal/operator/backrest"
"github.com/crunchydata/postgres-operator/internal/operator/pvc"
"github.com/crunchydata/postgres-operator/internal/util"
crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1"
"github.com/crunchydata/postgres-operator/pkg/events"
log "github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// ServiceTemplateFields ...
type ServiceTemplateFields struct {
Name string
ServiceName string
ClusterName string
Port string
PGBadgerPort string
ExporterPort string
ServiceType string
}
// ReplicaSuffix ...
const ReplicaSuffix = "-replica"
// contstants defining the names of the various sidecar containers
const (
collectCCPImage = "crunchy-collect"
pgBadgerCCPImage = "crunchy-pgbadger"
crunchyadmCCPImage = "crunchy-admin"
)
func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace string) {
var err error
if cl.Spec.Status == crv1.CompletedStatus {
errorMsg := "crv1 pgcluster " + cl.Spec.ClusterName + " is already marked complete, will not recreate"
log.Warn(errorMsg)
publishClusterCreateFailure(cl, errorMsg)
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cl, namespace, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.PrimaryStorage)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err = addClusterCreateMissingService(clientset, cl, namespace); err != nil {
log.Error("error in creating primary service " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
// Create a configMap for the cluster that will be utilized to configure whether or not
// initialization logic should be executed when the postgres-ha container is run. This
// ensures that the original primary in a PG cluster does not attempt to run any initialization
// logic following a restart of the container.
// If the configmap already exists, the cluster creation will continue as this is required
// for certain pgcluster upgrades.
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
if err := annotateBackrestSecret(clientset, cl); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
if err := addClusterDeployments(clientset, cl, namespace,
dataVolume, walVolume, tablespaceVolumes); err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
return
}
// Now scale the repo deployment only to ensure it is initialized prior to the primary DB.
// Once the repo is ready, the primary database deployment will then also be scaled to 1.
clusterInfo, err := ScaleClusterDeployments(clientset, *cl, 1, false, false, true, false)
if err != nil {
log.Error(err)
publishClusterCreateFailure(cl, err.Error())
}
log.Debugf("Scaled pgBackRest repo deployment %s to 1 to proceed with initializing "+
"cluster %s", clusterInfo.PrimaryDeployment, cl.GetName())
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/PrimaryStorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//publish create cluster event
//capture the cluster creation event
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateCluster,
},
Clustername: cl.ObjectMeta.Name,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
}
//add replicas if requested
if cl.Spec.Replicas != "" {
replicaCount, err := strconv.Atoi(cl.Spec.Replicas)
if err != nil {
log.Error("error in replicas value " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
//create a CRD for each replica
for i := 0; i < replicaCount; i++ {
spec := crv1.PgreplicaSpec{}
//get the storage config
spec.ReplicaStorage = cl.Spec.ReplicaStorage
spec.UserLabels = cl.Spec.UserLabels
//the replica should not use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil |
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CRD for this replica
_, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
DeleteReplica(clientset, replica, namespace)
//publish event for scale down
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleDownClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleDownCluster,
},
Clustername: replica.Spec.ClusterName,
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
return
}
}
// UpdateResources updates the PostgreSQL instance Deployments to reflect the
// update resources (i.e. CPU, memory)
func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error {
// get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
// iterate through each PostgreSQL instance deployment and update the
// resource values for the database container
//
// NOTE: a future version (near future) will first try to detect the primary
// so that all the replicas are updated first, and then the primary gets the
// update
for _, deployment := range deployments.Items {
// first, initialize the requests/limits resource to empty Resource Lists
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
// now, simply deep copy the values from the CRD
if cluster.Spec.Resources != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()
}
if cluster.Spec.Limits != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// update the deployment with the new values
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// UpdateTablespaces updates the PostgreSQL instance Deployments to update
// what tablespaces are mounted.
// Though any new tablespaces are present in the CRD, to attempt to do less work
// this function takes a map of the new tablespaces that are being added, so we
// only have to check and create the PVCs that are being mounted at this time
//
// To do this, iterate through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) {
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFailureFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateClusterFailure,
},
Clustername: cl.ObjectMeta.Name,
ErrorMessage: errorMsg,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err := events.Publish(f)
if err != nil {
log.Error(err.Error())
}
}
func publishClusterShutdown(cluster crv1.Pgcluster) error {
clusterName := cluster.Name
//capture the cluster creation event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventShutdownClusterFormat{
EventHeader: events.EventHeader{
Namespace: cluster.Namespace,
Username: cluster.Spec.UserLabels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventShutdownCluster,
},
Clustername: clusterName,
}
if err := events.Publish(f); err != nil {
log.Error(err.Error())
return err
}
return nil
}
// stopPostgreSQLInstance is a proxy function for the main
// StopPostgreSQLInstance function, as it preps a Deployment to have its
// PostgreSQL instance shut down. This helps to ensure that a PostgreSQL
// instance will launch and not be in crash recovery mode
func stopPostgreSQLInstance(clientset kubernetes.Interface, restConfig *rest.Config, deployment apps_v1.Deployment) error {
// First, attempt to get the PostgreSQL instance Pod attachd to this
// particular deployment
selector := fmt.Sprintf("%s=%s", config.LABEL_DEPLOYMENT_NAME, deployment.Name)
pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector})
// if there is a bona fide error, return.
// However, if no Pods are found, issue a warning, but do not return an error
// This likely means that PostgreSQL is already shutdown, but hey, it's the
// cloud
if err != nil {
return err
} else if len(pods.Items) == 0 {
log.Infof("not shutting down PostgreSQL instance [%s] as the Pod cannot be found", deployment.Name)
return nil
}
// get the first pod off the items list
pod := pods.Items[0]
// now we can shut down the cluster
if err := util.StopPostgreSQLInstance(clientset, restConfig, &pod, deployment.Name); err != nil {
return err
}
return nil
}
| {
return
} | conditional_block |
train.py | # standard modules
import sys
assert sys.version_info >= (3, 5), "Python 3.5 or greater required"
import argparse
import os
import time
import json
from packaging import version
import logging
logger = logging.getLogger('train_ganomaly')
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# external modules
import tensorflow as tf
import numpy as np
# package modules
from datasets.mvtec_ad import get_labeled_dataset
from datasets.common import get_dataset
from models.ganomaly import GANomaly
from models.cae import CAE
from models.cnae import CNAE
from models.cvae import CVAE
from utils.callbacks import ADModelEvaluator
from utils.datasets import create_anomaly_dataset
default_args = {
# training params
'epochs': 1,
'batch_size': 64,
'learning_rate': 0.0002,
'early_stopping_patience': 100,
'reduce_lr_patience': 0,
# tf.data piepline params
'dataset_name': 'mnist',
'cache_path': None,
'abnormal_class': 2, # only valid for mnist, fashion_mnist, cifar10, cifar100 and stl10
'image_size': 32,
'image_channels': 0, # only valid for MVTec AD
'buffer_size': 1000,
'shuffle': True,
'prefetch': True,
'random_flip': False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness: | if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument('--cache_path', type=str,
default=default_args['cache_path'])
parser.add_argument('--abnormal_class', type=int,
default=default_args['abnormal_class'])
parser.add_argument('--image_size', type=int,
default=default_args['image_size'])
parser.add_argument('--image_channels', type=int,
default=default_args['image_channels'])
parser.add_argument('--buffer_size', type=int,
default=default_args['buffer_size'])
parser.add_argument('--shuffle', type=str2bool, nargs='?',
const=True, default=default_args['shuffle'])
parser.add_argument('--prefetch', type=str2bool, nargs='?',
const=True, default=default_args['prefetch'])
parser.add_argument('--random_flip', type=str2bool, nargs='?',
const=True, default=default_args['random_flip'])
parser.add_argument('--random_crop', type=str2bool, nargs='?',
const=True, default=default_args['random_crop'])
parser.add_argument('--random_brightness', type=str2bool, nargs='?',
const=True, default=default_args['random_brightness'])
parser.add_argument('--repeat_dataset', type=str2posint,
default=default_args['repeat_dataset'])
# model params
parser.add_argument('--model_name', type=str,
default=default_args['model_name'])
parser.add_argument('--latent_size', type=int,
default=default_args['latent_size'])
parser.add_argument('--intermediate_size', type=int,
default=default_args['intermediate_size'])
parser.add_argument('--n_filters', type=int,
default=default_args['n_filters'])
parser.add_argument('--n_extra_layers', type=int,
default=default_args['n_extra_layers'])
parser.add_argument('--w_adv', type=int,
default=default_args['w_adv'])
parser.add_argument('--w_rec', type=int,
default=default_args['w_rec'])
parser.add_argument('--w_enc', type=int,
default=default_args['w_enc'])
# debugging params
parser.add_argument('--train_steps', type=str2posint,
default=default_args['train_steps'])
parser.add_argument('--eval_steps', type=str2posint,
default=default_args['eval_steps'])
parser.add_argument('--log_level', type=str2logging,
default=default_args['log_level'])
parser.add_argument('--debug', type=str2bool, nargs='?',
const=True, default=default_args['debug'])
# input/output dir params
parser.add_argument('--data_dir', type=str,
default=os.environ.get('SM_CHANNEL_DATA_DIR') or default_args['data_dir'])
parser.add_argument('--model_dir', type=str,
default=default_args['model_dir'])
parser.add_argument('--sm_model_dir', type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--output_data_dir', type=str,
default=os.environ.get('SM_OUTPUT_DATA_DIR') or default_args['output_data_dir'])
return parser.parse_known_args()
if __name__ == '__main__':
args, unknown = parse_args()
# setup logging
logging.basicConfig(stream=sys.stdout, # SageMaker doesn't log the default stderr
level=args.log_level,
# https://docs.python.org/3.8/library/logging.html#logrecord-attributes
format='[%(asctime)s | %(name)s | %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
# print info about script params and env values
debug('Know args: {}'.format(args))
if unknown:
debug('Unknown args: {}'.format(unknown))
sm_env_vals = ['{}="{}"'.format(env, val)
for env, val in os.environ.items() if env.startswith('SM_')]
if sm_env_vals:
debug('ENV: {}'.format(', '.join(sm_env_vals)))
# use eager execution for debugging
if args.debug:
assert version.parse('2.3') <= version.parse(tf.version.VERSION), "Tensorflow 2.3 or geater required"
tf.config.run_functions_eagerly(True)
main(args) | def augment_image(image, label): | random_line_split |
train.py | # standard modules
import sys
assert sys.version_info >= (3, 5), "Python 3.5 or greater required"
import argparse
import os
import time
import json
from packaging import version
import logging
logger = logging.getLogger('train_ganomaly')
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# external modules
import tensorflow as tf
import numpy as np
# package modules
from datasets.mvtec_ad import get_labeled_dataset
from datasets.common import get_dataset
from models.ganomaly import GANomaly
from models.cae import CAE
from models.cnae import CNAE
from models.cvae import CVAE
from utils.callbacks import ADModelEvaluator
from utils.datasets import create_anomaly_dataset
default_args = {
# training params
'epochs': 1,
'batch_size': 64,
'learning_rate': 0.0002,
'early_stopping_patience': 100,
'reduce_lr_patience': 0,
# tf.data piepline params
'dataset_name': 'mnist',
'cache_path': None,
'abnormal_class': 2, # only valid for mnist, fashion_mnist, cifar10, cifar100 and stl10
'image_size': 32,
'image_channels': 0, # only valid for MVTec AD
'buffer_size': 1000,
'shuffle': True,
'prefetch': True,
'random_flip': False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
|
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument('--cache_path', type=str,
default=default_args['cache_path'])
parser.add_argument('--abnormal_class', type=int,
default=default_args['abnormal_class'])
parser.add_argument('--image_size', type=int,
default=default_args['image_size'])
parser.add_argument('--image_channels', type=int,
default=default_args['image_channels'])
parser.add_argument('--buffer_size', type=int,
default=default_args['buffer_size'])
parser.add_argument('--shuffle', type=str2bool, nargs='?',
const=True, default=default_args['shuffle'])
parser.add_argument('--prefetch', type=str2bool, nargs='?',
const=True, default=default_args['prefetch'])
parser.add_argument('--random_flip', type=str2bool, nargs='?',
const=True, default=default_args['random_flip'])
parser.add_argument('--random_crop', type=str2bool, nargs='?',
const=True, default=default_args['random_crop'])
parser.add_argument('--random_brightness', type=str2bool, nargs='?',
const=True, default=default_args['random_brightness'])
parser.add_argument('--repeat_dataset', type=str2posint,
default=default_args['repeat_dataset'])
# model params
parser.add_argument('--model_name', type=str,
default=default_args['model_name'])
parser.add_argument('--latent_size', type=int,
default=default_args['latent_size'])
parser.add_argument('--intermediate_size', type=int,
default=default_args['intermediate_size'])
parser.add_argument('--n_filters', type=int,
default=default_args['n_filters'])
parser.add_argument('--n_extra_layers', type=int,
default=default_args['n_extra_layers'])
parser.add_argument('--w_adv', type=int,
default=default_args['w_adv'])
parser.add_argument('--w_rec', type=int,
default=default_args['w_rec'])
parser.add_argument('--w_enc', type=int,
default=default_args['w_enc'])
# debugging params
parser.add_argument('--train_steps', type=str2posint,
default=default_args['train_steps'])
parser.add_argument('--eval_steps', type=str2posint,
default=default_args['eval_steps'])
parser.add_argument('--log_level', type=str2logging,
default=default_args['log_level'])
parser.add_argument('--debug', type=str2bool, nargs='?',
const=True, default=default_args['debug'])
# input/output dir params
parser.add_argument('--data_dir', type=str,
default=os.environ.get('SM_CHANNEL_DATA_DIR') or default_args['data_dir'])
parser.add_argument('--model_dir', type=str,
default=default_args['model_dir'])
parser.add_argument('--sm_model_dir', type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--output_data_dir', type=str,
default=os.environ.get('SM_OUTPUT_DATA_DIR') or default_args['output_data_dir'])
return parser.parse_known_args()
if __name__ == '__main__':
args, unknown = parse_args()
# setup logging
logging.basicConfig(stream=sys.stdout, # SageMaker doesn't log the default stderr
level=args.log_level,
# https://docs.python.org/3.8/library/logging.html#logrecord-attributes
format='[%(asctime)s | %(name)s | %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
# print info about script params and env values
debug('Know args: {}'.format(args))
if unknown:
debug('Unknown args: {}'.format(unknown))
sm_env_vals = ['{}="{}"'.format(env, val)
for env, val in os.environ.items() if env.startswith('SM_')]
if sm_env_vals:
debug('ENV: {}'.format(', '.join(sm_env_vals)))
# use eager execution for debugging
if args.debug:
assert version.parse('2.3') <= version.parse(tf.version.VERSION), "Tensorflow 2.3 or geater required"
tf.config.run_functions_eagerly(True)
main(args)
| image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape) | conditional_block |
train.py | # standard modules
import sys
assert sys.version_info >= (3, 5), "Python 3.5 or greater required"
import argparse
import os
import time
import json
from packaging import version
import logging
logger = logging.getLogger('train_ganomaly')
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# external modules
import tensorflow as tf
import numpy as np
# package modules
from datasets.mvtec_ad import get_labeled_dataset
from datasets.common import get_dataset
from models.ganomaly import GANomaly
from models.cae import CAE
from models.cnae import CNAE
from models.cvae import CVAE
from utils.callbacks import ADModelEvaluator
from utils.datasets import create_anomaly_dataset
default_args = {
# training params
'epochs': 1,
'batch_size': 64,
'learning_rate': 0.0002,
'early_stopping_patience': 100,
'reduce_lr_patience': 0,
# tf.data piepline params
'dataset_name': 'mnist',
'cache_path': None,
'abnormal_class': 2, # only valid for mnist, fashion_mnist, cifar10, cifar100 and stl10
'image_size': 32,
'image_channels': 0, # only valid for MVTec AD
'buffer_size': 1000,
'shuffle': True,
'prefetch': True,
'random_flip': False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
|
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument('--cache_path', type=str,
default=default_args['cache_path'])
parser.add_argument('--abnormal_class', type=int,
default=default_args['abnormal_class'])
parser.add_argument('--image_size', type=int,
default=default_args['image_size'])
parser.add_argument('--image_channels', type=int,
default=default_args['image_channels'])
parser.add_argument('--buffer_size', type=int,
default=default_args['buffer_size'])
parser.add_argument('--shuffle', type=str2bool, nargs='?',
const=True, default=default_args['shuffle'])
parser.add_argument('--prefetch', type=str2bool, nargs='?',
const=True, default=default_args['prefetch'])
parser.add_argument('--random_flip', type=str2bool, nargs='?',
const=True, default=default_args['random_flip'])
parser.add_argument('--random_crop', type=str2bool, nargs='?',
const=True, default=default_args['random_crop'])
parser.add_argument('--random_brightness', type=str2bool, nargs='?',
const=True, default=default_args['random_brightness'])
parser.add_argument('--repeat_dataset', type=str2posint,
default=default_args['repeat_dataset'])
# model params
parser.add_argument('--model_name', type=str,
default=default_args['model_name'])
parser.add_argument('--latent_size', type=int,
default=default_args['latent_size'])
parser.add_argument('--intermediate_size', type=int,
default=default_args['intermediate_size'])
parser.add_argument('--n_filters', type=int,
default=default_args['n_filters'])
parser.add_argument('--n_extra_layers', type=int,
default=default_args['n_extra_layers'])
parser.add_argument('--w_adv', type=int,
default=default_args['w_adv'])
parser.add_argument('--w_rec', type=int,
default=default_args['w_rec'])
parser.add_argument('--w_enc', type=int,
default=default_args['w_enc'])
# debugging params
parser.add_argument('--train_steps', type=str2posint,
default=default_args['train_steps'])
parser.add_argument('--eval_steps', type=str2posint,
default=default_args['eval_steps'])
parser.add_argument('--log_level', type=str2logging,
default=default_args['log_level'])
parser.add_argument('--debug', type=str2bool, nargs='?',
const=True, default=default_args['debug'])
# input/output dir params
parser.add_argument('--data_dir', type=str,
default=os.environ.get('SM_CHANNEL_DATA_DIR') or default_args['data_dir'])
parser.add_argument('--model_dir', type=str,
default=default_args['model_dir'])
parser.add_argument('--sm_model_dir', type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--output_data_dir', type=str,
default=os.environ.get('SM_OUTPUT_DATA_DIR') or default_args['output_data_dir'])
return parser.parse_known_args()
if __name__ == '__main__':
args, unknown = parse_args()
# setup logging
logging.basicConfig(stream=sys.stdout, # SageMaker doesn't log the default stderr
level=args.log_level,
# https://docs.python.org/3.8/library/logging.html#logrecord-attributes
format='[%(asctime)s | %(name)s | %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
# print info about script params and env values
debug('Know args: {}'.format(args))
if unknown:
debug('Unknown args: {}'.format(unknown))
sm_env_vals = ['{}="{}"'.format(env, val)
for env, val in os.environ.items() if env.startswith('SM_')]
if sm_env_vals:
debug('ENV: {}'.format(', '.join(sm_env_vals)))
# use eager execution for debugging
if args.debug:
assert version.parse('2.3') <= version.parse(tf.version.VERSION), "Tensorflow 2.3 or geater required"
tf.config.run_functions_eagerly(True)
main(args)
| return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
) | identifier_body |
train.py | # standard modules
import sys
assert sys.version_info >= (3, 5), "Python 3.5 or greater required"
import argparse
import os
import time
import json
from packaging import version
import logging
logger = logging.getLogger('train_ganomaly')
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# external modules
import tensorflow as tf
import numpy as np
# package modules
from datasets.mvtec_ad import get_labeled_dataset
from datasets.common import get_dataset
from models.ganomaly import GANomaly
from models.cae import CAE
from models.cnae import CNAE
from models.cvae import CVAE
from utils.callbacks import ADModelEvaluator
from utils.datasets import create_anomaly_dataset
default_args = {
# training params
'epochs': 1,
'batch_size': 64,
'learning_rate': 0.0002,
'early_stopping_patience': 100,
'reduce_lr_patience': 0,
# tf.data piepline params
'dataset_name': 'mnist',
'cache_path': None,
'abnormal_class': 2, # only valid for mnist, fashion_mnist, cifar10, cifar100 and stl10
'image_size': 32,
'image_channels': 0, # only valid for MVTec AD
'buffer_size': 1000,
'shuffle': True,
'prefetch': True,
'random_flip': False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def | (args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument('--cache_path', type=str,
default=default_args['cache_path'])
parser.add_argument('--abnormal_class', type=int,
default=default_args['abnormal_class'])
parser.add_argument('--image_size', type=int,
default=default_args['image_size'])
parser.add_argument('--image_channels', type=int,
default=default_args['image_channels'])
parser.add_argument('--buffer_size', type=int,
default=default_args['buffer_size'])
parser.add_argument('--shuffle', type=str2bool, nargs='?',
const=True, default=default_args['shuffle'])
parser.add_argument('--prefetch', type=str2bool, nargs='?',
const=True, default=default_args['prefetch'])
parser.add_argument('--random_flip', type=str2bool, nargs='?',
const=True, default=default_args['random_flip'])
parser.add_argument('--random_crop', type=str2bool, nargs='?',
const=True, default=default_args['random_crop'])
parser.add_argument('--random_brightness', type=str2bool, nargs='?',
const=True, default=default_args['random_brightness'])
parser.add_argument('--repeat_dataset', type=str2posint,
default=default_args['repeat_dataset'])
# model params
parser.add_argument('--model_name', type=str,
default=default_args['model_name'])
parser.add_argument('--latent_size', type=int,
default=default_args['latent_size'])
parser.add_argument('--intermediate_size', type=int,
default=default_args['intermediate_size'])
parser.add_argument('--n_filters', type=int,
default=default_args['n_filters'])
parser.add_argument('--n_extra_layers', type=int,
default=default_args['n_extra_layers'])
parser.add_argument('--w_adv', type=int,
default=default_args['w_adv'])
parser.add_argument('--w_rec', type=int,
default=default_args['w_rec'])
parser.add_argument('--w_enc', type=int,
default=default_args['w_enc'])
# debugging params
parser.add_argument('--train_steps', type=str2posint,
default=default_args['train_steps'])
parser.add_argument('--eval_steps', type=str2posint,
default=default_args['eval_steps'])
parser.add_argument('--log_level', type=str2logging,
default=default_args['log_level'])
parser.add_argument('--debug', type=str2bool, nargs='?',
const=True, default=default_args['debug'])
# input/output dir params
parser.add_argument('--data_dir', type=str,
default=os.environ.get('SM_CHANNEL_DATA_DIR') or default_args['data_dir'])
parser.add_argument('--model_dir', type=str,
default=default_args['model_dir'])
parser.add_argument('--sm_model_dir', type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--output_data_dir', type=str,
default=os.environ.get('SM_OUTPUT_DATA_DIR') or default_args['output_data_dir'])
return parser.parse_known_args()
if __name__ == '__main__':
args, unknown = parse_args()
# setup logging
logging.basicConfig(stream=sys.stdout, # SageMaker doesn't log the default stderr
level=args.log_level,
# https://docs.python.org/3.8/library/logging.html#logrecord-attributes
format='[%(asctime)s | %(name)s | %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
# print info about script params and env values
debug('Know args: {}'.format(args))
if unknown:
debug('Unknown args: {}'.format(unknown))
sm_env_vals = ['{}="{}"'.format(env, val)
for env, val in os.environ.items() if env.startswith('SM_')]
if sm_env_vals:
debug('ENV: {}'.format(', '.join(sm_env_vals)))
# use eager execution for debugging
if args.debug:
assert version.parse('2.3') <= version.parse(tf.version.VERSION), "Tensorflow 2.3 or geater required"
tf.config.run_functions_eagerly(True)
main(args)
| build_model | identifier_name |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
/// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if !c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
| /// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
}
| unicode::is_word_character(c)
}
| identifier_body |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn | (c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
/// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if !c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
unicode::is_word_character(c)
}
/// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
}
| is_meta_character | identifier_name |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex. | /// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if !c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
unicode::is_word_character(c)
}
/// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
} | ///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false. | random_line_split |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if !backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> |
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a 'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if !follow || !path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| {
download::download_all(&self.data_dir)
} | identifier_body |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if !backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() |
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a 'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if !follow || !path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| {
args.create_index()?;
} | conditional_block |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if !backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3") | .help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a 'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if !follow || !path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
} | random_line_split | |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if !backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn | (matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a 'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if !follow || !path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| from_matches | identifier_name |
models.py | from app import db, marked_hashes
import random
from PIL import ImageDraw, Image, ImageFont
from PIL.ImageOps import invert
import numpy as np
from datetime import datetime as dt
from config import (CROP_MIN_MAX_GAP,
CROP_SIGNIFICANT_MEAN,
ROTATION_N_TO_SPLIT,
ROTATION_RESIZING_LEVELS, LOCKED_TIME_SECONDS)
from utils import load_image_from_url, image_hash_md5
def random_image(seed):
random.seed(seed)
width = random.randint(128, 1024 + 1)
height = random.randint(128, 1024 + 1)
img = Image.new('RGB', size=(width, height), color='white')
rotate_direction = random.randint(0, 3)
if rotate_direction in (0, 2):
font_size = random.randrange(width // 25, width // 10)
else:
font_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
|
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
angle = db.get_full_item(basic_image_id).get('angle', None)
borders = db.get_full_item(basic_image_id).get('borders', None)
(self._image,
angle,
borders) = transform_image(self._image, angle=angle, borders=borders)
db.get_full_item(self.image_id)['angle'] = angle
db.get_full_item(self.image_id)['borders'] = borders
return self._image
@property
def url(self):
return db.get_full_item(self.image_id)['url']
@property
def duplicate(self):
return db.get_full_item(self.image_id).get('duplicate', False)
@duplicate.setter
def duplicate(self, value):
db.get_full_item(self.image_id)['duplicate'] = value
db.save()
def set_lock(self):
db.get_full_item(self.image_id)['lock_time'] = dt.now()
def remove_lock(self):
db.get_full_item(self.image_id).pop('lock_time', None)
@property
def locked(self):
lock_time = db.get_full_item(self.image_id).get('lock_time', None)
if lock_time is not None:
if (dt.now() - lock_time).seconds > LOCKED_TIME_SECONDS:
self.remove_lock()
return False
else:
return True
return False
@property
def hash(self):
image_hash = db.get_full_item(self.image_id).get('hash_md5', None)
if image_hash is None:
if self._image is None:
image_hash = image_hash_md5(load_image_from_url(self.url))
else:
image_hash = image_hash_md5(self._image)
db.get_full_item(self.image_id)['hash_md5'] = image_hash
return image_hash
@property
def basic_image_id(self):
image_hash = self.hash
if image_hash in marked_hashes:
marked_images = marked_hashes.get(image_hash, [])
if len(marked_hashes) > 0:
return marked_images[0]
return self.image_id
class ImagesToMark:
def __init__(self):
pass
def __getitem__(self, item):
return ImageToMark(item)
| criteria[axis] = sum_over_axis[-1] - sum_over_axis[0] | conditional_block |
models.py | from app import db, marked_hashes
import random
from PIL import ImageDraw, Image, ImageFont
from PIL.ImageOps import invert
import numpy as np
from datetime import datetime as dt
from config import (CROP_MIN_MAX_GAP,
CROP_SIGNIFICANT_MEAN,
ROTATION_N_TO_SPLIT,
ROTATION_RESIZING_LEVELS, LOCKED_TIME_SECONDS)
from utils import load_image_from_url, image_hash_md5
def random_image(seed):
random.seed(seed)
width = random.randint(128, 1024 + 1)
height = random.randint(128, 1024 + 1)
img = Image.new('RGB', size=(width, height), color='white')
rotate_direction = random.randint(0, 3)
if rotate_direction in (0, 2):
font_size = random.randrange(width // 25, width // 10)
else:
font_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""pl | arkdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
angle = db.get_full_item(basic_image_id).get('angle', None)
borders = db.get_full_item(basic_image_id).get('borders', None)
(self._image,
angle,
borders) = transform_image(self._image, angle=angle, borders=borders)
db.get_full_item(self.image_id)['angle'] = angle
db.get_full_item(self.image_id)['borders'] = borders
return self._image
@property
def url(self):
return db.get_full_item(self.image_id)['url']
@property
def duplicate(self):
return db.get_full_item(self.image_id).get('duplicate', False)
@duplicate.setter
def duplicate(self, value):
db.get_full_item(self.image_id)['duplicate'] = value
db.save()
def set_lock(self):
db.get_full_item(self.image_id)['lock_time'] = dt.now()
def remove_lock(self):
db.get_full_item(self.image_id).pop('lock_time', None)
@property
def locked(self):
lock_time = db.get_full_item(self.image_id).get('lock_time', None)
if lock_time is not None:
if (dt.now() - lock_time).seconds > LOCKED_TIME_SECONDS:
self.remove_lock()
return False
else:
return True
return False
@property
def hash(self):
image_hash = db.get_full_item(self.image_id).get('hash_md5', None)
if image_hash is None:
if self._image is None:
image_hash = image_hash_md5(load_image_from_url(self.url))
else:
image_hash = image_hash_md5(self._image)
db.get_full_item(self.image_id)['hash_md5'] = image_hash
return image_hash
@property
def basic_image_id(self):
image_hash = self.hash
if image_hash in marked_hashes:
marked_images = marked_hashes.get(image_hash, [])
if len(marked_hashes) > 0:
return marked_images[0]
return self.image_id
class ImagesToMark:
def __init__(self):
pass
def __getitem__(self, item):
return ImageToMark(item)
| anning m | identifier_name |
models.py | from app import db, marked_hashes
import random
from PIL import ImageDraw, Image, ImageFont
from PIL.ImageOps import invert
import numpy as np
from datetime import datetime as dt
from config import (CROP_MIN_MAX_GAP,
CROP_SIGNIFICANT_MEAN,
ROTATION_N_TO_SPLIT,
ROTATION_RESIZING_LEVELS, LOCKED_TIME_SECONDS)
from utils import load_image_from_url, image_hash_md5
| img = Image.new('RGB', size=(width, height), color='white')
rotate_direction = random.randint(0, 3)
if rotate_direction in (0, 2):
font_size = random.randrange(width // 25, width // 10)
else:
font_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
angle = db.get_full_item(basic_image_id).get('angle', None)
borders = db.get_full_item(basic_image_id).get('borders', None)
(self._image,
angle,
borders) = transform_image(self._image, angle=angle, borders=borders)
db.get_full_item(self.image_id)['angle'] = angle
db.get_full_item(self.image_id)['borders'] = borders
return self._image
@property
def url(self):
return db.get_full_item(self.image_id)['url']
@property
def duplicate(self):
return db.get_full_item(self.image_id).get('duplicate', False)
@duplicate.setter
def duplicate(self, value):
db.get_full_item(self.image_id)['duplicate'] = value
db.save()
def set_lock(self):
db.get_full_item(self.image_id)['lock_time'] = dt.now()
def remove_lock(self):
db.get_full_item(self.image_id).pop('lock_time', None)
@property
def locked(self):
lock_time = db.get_full_item(self.image_id).get('lock_time', None)
if lock_time is not None:
if (dt.now() - lock_time).seconds > LOCKED_TIME_SECONDS:
self.remove_lock()
return False
else:
return True
return False
@property
def hash(self):
image_hash = db.get_full_item(self.image_id).get('hash_md5', None)
if image_hash is None:
if self._image is None:
image_hash = image_hash_md5(load_image_from_url(self.url))
else:
image_hash = image_hash_md5(self._image)
db.get_full_item(self.image_id)['hash_md5'] = image_hash
return image_hash
@property
def basic_image_id(self):
image_hash = self.hash
if image_hash in marked_hashes:
marked_images = marked_hashes.get(image_hash, [])
if len(marked_hashes) > 0:
return marked_images[0]
return self.image_id
class ImagesToMark:
def __init__(self):
pass
def __getitem__(self, item):
return ImageToMark(item) | def random_image(seed):
random.seed(seed)
width = random.randint(128, 1024 + 1)
height = random.randint(128, 1024 + 1) | random_line_split |
models.py | from app import db, marked_hashes
import random
from PIL import ImageDraw, Image, ImageFont
from PIL.ImageOps import invert
import numpy as np
from datetime import datetime as dt
from config import (CROP_MIN_MAX_GAP,
CROP_SIGNIFICANT_MEAN,
ROTATION_N_TO_SPLIT,
ROTATION_RESIZING_LEVELS, LOCKED_TIME_SECONDS)
from utils import load_image_from_url, image_hash_md5
def random_image(seed):
random.seed(seed)
width = random.randint(128, 1024 + 1)
height = random.randint(128, 1024 + 1)
img = Image.new('RGB', size=(width, height), color='white')
rotate_direction = random.randint(0, 3)
if rotate_direction in (0, 2):
font_size = random.randrange(width // 25, width // 10)
else:
font_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
angle = db.get_full_item(basic_image_id).get('angle', None)
borders = db.get_full_item(basic_image_id).get('borders', None)
(self._image,
angle,
borders) = transform_image(self._image, angle=angle, borders=borders)
db.get_full_item(self.image_id)['angle'] = angle
db.get_full_item(self.image_id)['borders'] = borders
return self._image
@property
def url(self):
return db.get_full_item(self. | cate(self):
return db.get_full_item(self.image_id).get('duplicate', False)
@duplicate.setter
def duplicate(self, value):
db.get_full_item(self.image_id)['duplicate'] = value
db.save()
def set_lock(self):
db.get_full_item(self.image_id)['lock_time'] = dt.now()
def remove_lock(self):
db.get_full_item(self.image_id).pop('lock_time', None)
@property
def locked(self):
lock_time = db.get_full_item(self.image_id).get('lock_time', None)
if lock_time is not None:
if (dt.now() - lock_time).seconds > LOCKED_TIME_SECONDS:
self.remove_lock()
return False
else:
return True
return False
@property
def hash(self):
image_hash = db.get_full_item(self.image_id).get('hash_md5', None)
if image_hash is None:
if self._image is None:
image_hash = image_hash_md5(load_image_from_url(self.url))
else:
image_hash = image_hash_md5(self._image)
db.get_full_item(self.image_id)['hash_md5'] = image_hash
return image_hash
@property
def basic_image_id(self):
image_hash = self.hash
if image_hash in marked_hashes:
marked_images = marked_hashes.get(image_hash, [])
if len(marked_hashes) > 0:
return marked_images[0]
return self.image_id
class ImagesToMark:
def __init__(self):
pass
def __getitem__(self, item):
return ImageToMark(item)
| image_id)['url']
@property
def dupli | identifier_body |
rpc_test.go | package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strconv"
"strings"
//"sync"
"os"
"os/exec"
"sync"
"testing"
"time"
)
var fsp []*exec.Cmd
var leaderUrl string
var num int
var id_from_url map[string]int
func TestStartServers(t *testing.T) {
num = 5
fsp = make([]*exec.Cmd, num)
id_from_url = make(map[string]int)
for i := 0; i < num; i++ {
fsp[i] = exec.Command("./assignment4", strconv.Itoa(i+1))
fsp[i].Stdout = os.Stdout
fsp[i].Stderr = os.Stdout
fsp[i].Stdin = os.Stdin
fsp[i].Start()
id_from_url["localhost:"+strconv.Itoa(8000+i)] = i + 1
}
time.Sleep(2 * time.Second)
for {
leaderUrl = "localhost:8000"
leaderCl := mkClientUrl(t, leaderUrl)
m, _ := leaderCl.read("cs733net")
//fmt.Println("message: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
break
}
} else if m.Kind == 'F' {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func TestRPC_BasicSequential(t *testing.T) {
//fmt.Println("Leader Url ", leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("cs733net")
//fmt.Println(m, err)
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Write file cs733net
data := "Cloud fun"
m, err = leaderCl.write("cs733net", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
// CAS in new value
version1 := m.Version
data2 := "Cloud fun 2"
// Cas new value
m, err = leaderCl.cas("cs733net", version1, data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "cas success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "read my cas", err)
// Expect Cas to fail with old version
m, err = leaderCl.cas("cs733net", version1, data, 0)
expect(t, m, &Msg{Kind: 'V'}, "cas version mismatch", err)
// Expect a failed cas to not have succeeded. Read should return data2.
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "failed cas to not have succeeded", err)
// delete
m, err = leaderCl.delete("cs733net")
expect(t, m, &Msg{Kind: 'O'}, "delete success", err)
// Expect to not find the file
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
time.Sleep(1 * time.Second)
}
func TestRPC_Binary(t *testing.T) {
leaderCl := mkClientUrl(t, leaderUrl)
defer leaderCl.close()
// Write binary contents
data := "\x00\x01\r\n\x03" // some non-ascii, some crlf chars
m, err := leaderCl.write("binfile", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("binfile")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
}
func TestRPC_Chunks(t *testing.T) {
// Should be able to accept a few bytes at a time
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) |
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ = clients[0].read("concCas")
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg.Kind = %d, msg.Contents=%s", m.Kind, m.Contents)
}
}
func PTest_Kill_Leader_And_Revive(t *testing.T) {
leaderId := id_from_url[leaderUrl]
leaderCl := mkClientUrl(t, leaderUrl)
data := "Some data before kill"
m, err := leaderCl.write("killers.txt", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
time.Sleep(2 * time.Second)
fsp[leaderId-1].Process.Kill()
//fmt.Println("Killed: ", err, leaderId)
time.Sleep(4 * time.Second) //for elections
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId%num)
//fmt.Println(leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message2: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
//fmt.Println("pppp")
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
}
} else if m.Kind == 'C' {
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
//fmt.Println("ddddd")
new_leader_id := id_from_url[leaderUrl]
data2 := "new data for file"
leaderCl = mkClientUrl(t, leaderUrl)
leaderCl.write("killers.txt", data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
fsp[new_leader_id-1].Process.Kill()
fsp[leaderId-1] = exec.Command("./assignment4", strconv.Itoa(leaderId))
fsp[leaderId-1].Stdout = os.Stdout
fsp[leaderId-1].Stderr = os.Stdout
fsp[leaderId-1].Stdin = os.Stdin
fsp[leaderId-1].Start()
time.Sleep(1 * time.Second)
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId-1)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message3: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data2, err)
break
}
} else if m.Kind == 'C' {
t.Error("Leader elected although log might be incomplete", m)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
}
func Test_Kill_all(t *testing.T) {
for _, fs := range fsp {
fs.Process.Kill()
}
time.Sleep(1 * time.Second)
}
func Test_Clean(t *testing.T) {
for i := 1; i <= num; i++ {
str := strconv.Itoa(i)
os.RemoveAll("mylog" + str)
os.Remove("stateStoreFile" + str)
}
}
//----------------------------------------------------------------------
// Utility functions
func expect(t *testing.T, response *Msg, expected *Msg, errstr string, err error) {
if err != nil {
t.Fatal("Unexpected error: " + err.Error())
}
ok := true
if response.Kind != expected.Kind {
ok = false
errstr += fmt.Sprintf(" Got kind='%c', expected '%c'", response.Kind, expected.Kind)
}
if expected.Version > 0 && expected.Version != response.Version {
ok = false
errstr += " Version mismatch"
}
if response.Kind == 'C' {
if expected.Contents != nil &&
bytes.Compare(response.Contents, expected.Contents) != 0 {
ok = false
}
}
if !ok {
t.Fatal("Expected " + errstr)
}
}
type Msg struct {
// Kind = the first character of the command. For errors, it
// is the first letter after "ERR_", ('V' for ERR_VERSION, for
// example), except for "ERR_CMD_ERR", for which the kind is 'M'
Kind byte
Filename string
Contents []byte
Numbytes int
Exptime int // expiry time in seconds
Version int
}
func (cl *Client) read(filename string) (*Msg, error) {
cmd := "read " + filename + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) write(filename string, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("write %s %d\r\n", filename, len(contents))
} else {
cmd = fmt.Sprintf("write %s %d %d\r\n", filename, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) cas(filename string, version int, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("cas %s %d %d\r\n", filename, version, len(contents))
} else {
cmd = fmt.Sprintf("cas %s %d %d %d\r\n", filename, version, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) delete(filename string) (*Msg, error) {
cmd := "delete " + filename + "\r\n"
return cl.sendRcv(cmd)
}
var errNoConn = errors.New("Connection is closed")
type Client struct {
conn *net.TCPConn
reader *bufio.Reader // a bufio Reader wrapper over conn
}
func mkClient(t *testing.T, host string, port int) *Client {
return mkClientUrl(t, host+":"+strconv.Itoa(port))
}
func mkClientUrl(t *testing.T, url string) *Client {
var client *Client
raddr, err := net.ResolveTCPAddr("tcp", url)
if err == nil {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
client = &Client{conn: conn, reader: bufio.NewReader(conn)}
}
}
if err != nil {
t.Fatal(err)
}
return client
}
func (cl *Client) send(str string) error {
if cl.conn == nil {
return errNoConn
}
_, err := cl.conn.Write([]byte(str))
if err != nil {
err = fmt.Errorf("Write error in SendRaw: %v", err)
cl.conn.Close()
cl.conn = nil
}
return err
}
func (cl *Client) sendRcv(str string) (msg *Msg, err error) {
if cl.conn == nil {
return nil, errNoConn
}
err = cl.send(str)
if err == nil {
msg, err = cl.rcv()
}
return msg, err
}
func (cl *Client) close() {
if cl != nil && cl.conn != nil {
cl.conn.Close()
cl.conn = nil
}
}
func (cl *Client) rcv() (msg *Msg, err error) {
// we will assume no errors in server side formatting
line, err := cl.reader.ReadString('\n')
if err == nil {
msg, err = parseFirst(line)
if err != nil {
return nil, err
}
if msg.Kind == 'C' {
contents := make([]byte, msg.Numbytes)
var c byte
for i := 0; i < msg.Numbytes; i++ {
if c, err = cl.reader.ReadByte(); err != nil {
break
}
contents[i] = c
}
if err == nil {
msg.Contents = contents
cl.reader.ReadByte() // \r
cl.reader.ReadByte() // \n
}
}
}
if err != nil {
cl.close()
}
return msg, err
}
func parseFirst(line string) (msg *Msg, err error) {
fields := strings.Fields(line)
msg = &Msg{}
// Utility function fieldNum to int
toInt := func(fieldNum int) int {
var i int
if err == nil {
if fieldNum >= len(fields) {
err = errors.New(fmt.Sprintf("Not enough fields. Expected field #%d in %s\n", fieldNum, line))
return 0
}
i, err = strconv.Atoi(fields[fieldNum])
}
return i
}
if len(fields) == 0 {
return nil, errors.New("Empty line. The previous command is likely at fault")
}
switch fields[0] {
case "OK": // OK [version]
msg.Kind = 'O'
if len(fields) > 1 {
msg.Version = toInt(1)
}
case "CONTENTS": // CONTENTS <version> <numbytes> <exptime> \r\n
msg.Kind = 'C'
msg.Version = toInt(1)
msg.Numbytes = toInt(2)
msg.Exptime = toInt(3)
case "ERR_VERSION":
msg.Kind = 'V'
msg.Version = toInt(1)
case "ERR_FILE_NOT_FOUND":
msg.Kind = 'F'
case "ERR_CMD_ERR":
msg.Kind = 'M'
case "ERR_INTERNAL":
msg.Kind = 'I'
case "ERR_REDIRECT":
msg.Kind = 'R'
msg.Contents = []byte(fields[1])
default:
err = errors.New("Unknown response " + fields[0])
}
if err != nil {
return nil, err
} else {
return msg, nil
}
}
| {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
} | identifier_body |
rpc_test.go | package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strconv"
"strings"
//"sync"
"os"
"os/exec"
"sync"
"testing"
"time"
)
var fsp []*exec.Cmd
var leaderUrl string
var num int
var id_from_url map[string]int
func TestStartServers(t *testing.T) {
num = 5
fsp = make([]*exec.Cmd, num)
id_from_url = make(map[string]int)
for i := 0; i < num; i++ {
fsp[i] = exec.Command("./assignment4", strconv.Itoa(i+1))
fsp[i].Stdout = os.Stdout
fsp[i].Stderr = os.Stdout
fsp[i].Stdin = os.Stdin
fsp[i].Start()
id_from_url["localhost:"+strconv.Itoa(8000+i)] = i + 1
}
time.Sleep(2 * time.Second)
for {
leaderUrl = "localhost:8000"
leaderCl := mkClientUrl(t, leaderUrl)
m, _ := leaderCl.read("cs733net")
//fmt.Println("message: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
break
}
} else if m.Kind == 'F' {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func TestRPC_BasicSequential(t *testing.T) {
//fmt.Println("Leader Url ", leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("cs733net")
//fmt.Println(m, err)
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Write file cs733net
data := "Cloud fun"
m, err = leaderCl.write("cs733net", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
// CAS in new value
version1 := m.Version
data2 := "Cloud fun 2"
// Cas new value
m, err = leaderCl.cas("cs733net", version1, data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "cas success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "read my cas", err)
// Expect Cas to fail with old version
m, err = leaderCl.cas("cs733net", version1, data, 0)
expect(t, m, &Msg{Kind: 'V'}, "cas version mismatch", err)
// Expect a failed cas to not have succeeded. Read should return data2.
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "failed cas to not have succeeded", err)
// delete
m, err = leaderCl.delete("cs733net")
expect(t, m, &Msg{Kind: 'O'}, "delete success", err)
// Expect to not find the file
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
time.Sleep(1 * time.Second)
}
func TestRPC_Binary(t *testing.T) {
leaderCl := mkClientUrl(t, leaderUrl)
defer leaderCl.close()
// Write binary contents
data := "\x00\x01\r\n\x03" // some non-ascii, some crlf chars
m, err := leaderCl.write("binfile", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("binfile")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
}
func TestRPC_Chunks(t *testing.T) {
// Should be able to accept a few bytes at a time
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ { | m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ = clients[0].read("concCas")
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg.Kind = %d, msg.Contents=%s", m.Kind, m.Contents)
}
}
func PTest_Kill_Leader_And_Revive(t *testing.T) {
leaderId := id_from_url[leaderUrl]
leaderCl := mkClientUrl(t, leaderUrl)
data := "Some data before kill"
m, err := leaderCl.write("killers.txt", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
time.Sleep(2 * time.Second)
fsp[leaderId-1].Process.Kill()
//fmt.Println("Killed: ", err, leaderId)
time.Sleep(4 * time.Second) //for elections
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId%num)
//fmt.Println(leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message2: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
//fmt.Println("pppp")
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
}
} else if m.Kind == 'C' {
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
//fmt.Println("ddddd")
new_leader_id := id_from_url[leaderUrl]
data2 := "new data for file"
leaderCl = mkClientUrl(t, leaderUrl)
leaderCl.write("killers.txt", data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
fsp[new_leader_id-1].Process.Kill()
fsp[leaderId-1] = exec.Command("./assignment4", strconv.Itoa(leaderId))
fsp[leaderId-1].Stdout = os.Stdout
fsp[leaderId-1].Stderr = os.Stdout
fsp[leaderId-1].Stdin = os.Stdin
fsp[leaderId-1].Start()
time.Sleep(1 * time.Second)
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId-1)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message3: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data2, err)
break
}
} else if m.Kind == 'C' {
t.Error("Leader elected although log might be incomplete", m)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
}
func Test_Kill_all(t *testing.T) {
for _, fs := range fsp {
fs.Process.Kill()
}
time.Sleep(1 * time.Second)
}
func Test_Clean(t *testing.T) {
for i := 1; i <= num; i++ {
str := strconv.Itoa(i)
os.RemoveAll("mylog" + str)
os.Remove("stateStoreFile" + str)
}
}
//----------------------------------------------------------------------
// Utility functions
func expect(t *testing.T, response *Msg, expected *Msg, errstr string, err error) {
if err != nil {
t.Fatal("Unexpected error: " + err.Error())
}
ok := true
if response.Kind != expected.Kind {
ok = false
errstr += fmt.Sprintf(" Got kind='%c', expected '%c'", response.Kind, expected.Kind)
}
if expected.Version > 0 && expected.Version != response.Version {
ok = false
errstr += " Version mismatch"
}
if response.Kind == 'C' {
if expected.Contents != nil &&
bytes.Compare(response.Contents, expected.Contents) != 0 {
ok = false
}
}
if !ok {
t.Fatal("Expected " + errstr)
}
}
type Msg struct {
// Kind = the first character of the command. For errors, it
// is the first letter after "ERR_", ('V' for ERR_VERSION, for
// example), except for "ERR_CMD_ERR", for which the kind is 'M'
Kind byte
Filename string
Contents []byte
Numbytes int
Exptime int // expiry time in seconds
Version int
}
func (cl *Client) read(filename string) (*Msg, error) {
cmd := "read " + filename + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) write(filename string, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("write %s %d\r\n", filename, len(contents))
} else {
cmd = fmt.Sprintf("write %s %d %d\r\n", filename, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) cas(filename string, version int, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("cas %s %d %d\r\n", filename, version, len(contents))
} else {
cmd = fmt.Sprintf("cas %s %d %d %d\r\n", filename, version, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) delete(filename string) (*Msg, error) {
cmd := "delete " + filename + "\r\n"
return cl.sendRcv(cmd)
}
var errNoConn = errors.New("Connection is closed")
type Client struct {
conn *net.TCPConn
reader *bufio.Reader // a bufio Reader wrapper over conn
}
func mkClient(t *testing.T, host string, port int) *Client {
return mkClientUrl(t, host+":"+strconv.Itoa(port))
}
func mkClientUrl(t *testing.T, url string) *Client {
var client *Client
raddr, err := net.ResolveTCPAddr("tcp", url)
if err == nil {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
client = &Client{conn: conn, reader: bufio.NewReader(conn)}
}
}
if err != nil {
t.Fatal(err)
}
return client
}
func (cl *Client) send(str string) error {
if cl.conn == nil {
return errNoConn
}
_, err := cl.conn.Write([]byte(str))
if err != nil {
err = fmt.Errorf("Write error in SendRaw: %v", err)
cl.conn.Close()
cl.conn = nil
}
return err
}
func (cl *Client) sendRcv(str string) (msg *Msg, err error) {
if cl.conn == nil {
return nil, errNoConn
}
err = cl.send(str)
if err == nil {
msg, err = cl.rcv()
}
return msg, err
}
func (cl *Client) close() {
if cl != nil && cl.conn != nil {
cl.conn.Close()
cl.conn = nil
}
}
func (cl *Client) rcv() (msg *Msg, err error) {
// we will assume no errors in server side formatting
line, err := cl.reader.ReadString('\n')
if err == nil {
msg, err = parseFirst(line)
if err != nil {
return nil, err
}
if msg.Kind == 'C' {
contents := make([]byte, msg.Numbytes)
var c byte
for i := 0; i < msg.Numbytes; i++ {
if c, err = cl.reader.ReadByte(); err != nil {
break
}
contents[i] = c
}
if err == nil {
msg.Contents = contents
cl.reader.ReadByte() // \r
cl.reader.ReadByte() // \n
}
}
}
if err != nil {
cl.close()
}
return msg, err
}
func parseFirst(line string) (msg *Msg, err error) {
fields := strings.Fields(line)
msg = &Msg{}
// Utility function fieldNum to int
toInt := func(fieldNum int) int {
var i int
if err == nil {
if fieldNum >= len(fields) {
err = errors.New(fmt.Sprintf("Not enough fields. Expected field #%d in %s\n", fieldNum, line))
return 0
}
i, err = strconv.Atoi(fields[fieldNum])
}
return i
}
if len(fields) == 0 {
return nil, errors.New("Empty line. The previous command is likely at fault")
}
switch fields[0] {
case "OK": // OK [version]
msg.Kind = 'O'
if len(fields) > 1 {
msg.Version = toInt(1)
}
case "CONTENTS": // CONTENTS <version> <numbytes> <exptime> \r\n
msg.Kind = 'C'
msg.Version = toInt(1)
msg.Numbytes = toInt(2)
msg.Exptime = toInt(3)
case "ERR_VERSION":
msg.Kind = 'V'
msg.Version = toInt(1)
case "ERR_FILE_NOT_FOUND":
msg.Kind = 'F'
case "ERR_CMD_ERR":
msg.Kind = 'M'
case "ERR_INTERNAL":
msg.Kind = 'I'
case "ERR_REDIRECT":
msg.Kind = 'R'
msg.Contents = []byte(fields[1])
default:
err = errors.New("Unknown response " + fields[0])
}
if err != nil {
return nil, err
} else {
return msg, nil
}
} | str := fmt.Sprintf("cl %d %d", i, j)
for { | random_line_split |
rpc_test.go | package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strconv"
"strings"
//"sync"
"os"
"os/exec"
"sync"
"testing"
"time"
)
var fsp []*exec.Cmd
var leaderUrl string
var num int
var id_from_url map[string]int
func TestStartServers(t *testing.T) {
num = 5
fsp = make([]*exec.Cmd, num)
id_from_url = make(map[string]int)
for i := 0; i < num; i++ {
fsp[i] = exec.Command("./assignment4", strconv.Itoa(i+1))
fsp[i].Stdout = os.Stdout
fsp[i].Stderr = os.Stdout
fsp[i].Stdin = os.Stdin
fsp[i].Start()
id_from_url["localhost:"+strconv.Itoa(8000+i)] = i + 1
}
time.Sleep(2 * time.Second)
for {
leaderUrl = "localhost:8000"
leaderCl := mkClientUrl(t, leaderUrl)
m, _ := leaderCl.read("cs733net")
//fmt.Println("message: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
break
}
} else if m.Kind == 'F' {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func TestRPC_BasicSequential(t *testing.T) {
//fmt.Println("Leader Url ", leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("cs733net")
//fmt.Println(m, err)
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Write file cs733net
data := "Cloud fun"
m, err = leaderCl.write("cs733net", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
// CAS in new value
version1 := m.Version
data2 := "Cloud fun 2"
// Cas new value
m, err = leaderCl.cas("cs733net", version1, data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "cas success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "read my cas", err)
// Expect Cas to fail with old version
m, err = leaderCl.cas("cs733net", version1, data, 0)
expect(t, m, &Msg{Kind: 'V'}, "cas version mismatch", err)
// Expect a failed cas to not have succeeded. Read should return data2.
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "failed cas to not have succeeded", err)
// delete
m, err = leaderCl.delete("cs733net")
expect(t, m, &Msg{Kind: 'O'}, "delete success", err)
// Expect to not find the file
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
time.Sleep(1 * time.Second)
}
func TestRPC_Binary(t *testing.T) {
leaderCl := mkClientUrl(t, leaderUrl)
defer leaderCl.close()
// Write binary contents
data := "\x00\x01\r\n\x03" // some non-ascii, some crlf chars
m, err := leaderCl.write("binfile", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("binfile")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
}
func TestRPC_Chunks(t *testing.T) {
// Should be able to accept a few bytes at a time
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ = clients[0].read("concCas")
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg.Kind = %d, msg.Contents=%s", m.Kind, m.Contents)
}
}
func PTest_Kill_Leader_And_Revive(t *testing.T) {
leaderId := id_from_url[leaderUrl]
leaderCl := mkClientUrl(t, leaderUrl)
data := "Some data before kill"
m, err := leaderCl.write("killers.txt", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
time.Sleep(2 * time.Second)
fsp[leaderId-1].Process.Kill()
//fmt.Println("Killed: ", err, leaderId)
time.Sleep(4 * time.Second) //for elections
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId%num)
//fmt.Println(leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message2: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
//fmt.Println("pppp")
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
}
} else if m.Kind == 'C' {
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
//fmt.Println("ddddd")
new_leader_id := id_from_url[leaderUrl]
data2 := "new data for file"
leaderCl = mkClientUrl(t, leaderUrl)
leaderCl.write("killers.txt", data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
fsp[new_leader_id-1].Process.Kill()
fsp[leaderId-1] = exec.Command("./assignment4", strconv.Itoa(leaderId))
fsp[leaderId-1].Stdout = os.Stdout
fsp[leaderId-1].Stderr = os.Stdout
fsp[leaderId-1].Stdin = os.Stdin
fsp[leaderId-1].Start()
time.Sleep(1 * time.Second)
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId-1)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message3: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data2, err)
break
}
} else if m.Kind == 'C' {
t.Error("Leader elected although log might be incomplete", m)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
}
func Test_Kill_all(t *testing.T) {
for _, fs := range fsp {
fs.Process.Kill()
}
time.Sleep(1 * time.Second)
}
func Test_Clean(t *testing.T) {
for i := 1; i <= num; i++ {
str := strconv.Itoa(i)
os.RemoveAll("mylog" + str)
os.Remove("stateStoreFile" + str)
}
}
//----------------------------------------------------------------------
// Utility functions
func expect(t *testing.T, response *Msg, expected *Msg, errstr string, err error) {
if err != nil {
t.Fatal("Unexpected error: " + err.Error())
}
ok := true
if response.Kind != expected.Kind {
ok = false
errstr += fmt.Sprintf(" Got kind='%c', expected '%c'", response.Kind, expected.Kind)
}
if expected.Version > 0 && expected.Version != response.Version {
ok = false
errstr += " Version mismatch"
}
if response.Kind == 'C' {
if expected.Contents != nil &&
bytes.Compare(response.Contents, expected.Contents) != 0 {
ok = false
}
}
if !ok {
t.Fatal("Expected " + errstr)
}
}
type Msg struct {
// Kind = the first character of the command. For errors, it
// is the first letter after "ERR_", ('V' for ERR_VERSION, for
// example), except for "ERR_CMD_ERR", for which the kind is 'M'
Kind byte
Filename string
Contents []byte
Numbytes int
Exptime int // expiry time in seconds
Version int
}
func (cl *Client) read(filename string) (*Msg, error) {
cmd := "read " + filename + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) write(filename string, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("write %s %d\r\n", filename, len(contents))
} else {
cmd = fmt.Sprintf("write %s %d %d\r\n", filename, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) cas(filename string, version int, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("cas %s %d %d\r\n", filename, version, len(contents))
} else {
cmd = fmt.Sprintf("cas %s %d %d %d\r\n", filename, version, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) delete(filename string) (*Msg, error) {
cmd := "delete " + filename + "\r\n"
return cl.sendRcv(cmd)
}
var errNoConn = errors.New("Connection is closed")
type Client struct {
conn *net.TCPConn
reader *bufio.Reader // a bufio Reader wrapper over conn
}
func mkClient(t *testing.T, host string, port int) *Client {
return mkClientUrl(t, host+":"+strconv.Itoa(port))
}
func mkClientUrl(t *testing.T, url string) *Client {
var client *Client
raddr, err := net.ResolveTCPAddr("tcp", url)
if err == nil {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
client = &Client{conn: conn, reader: bufio.NewReader(conn)}
}
}
if err != nil |
return client
}
func (cl *Client) send(str string) error {
if cl.conn == nil {
return errNoConn
}
_, err := cl.conn.Write([]byte(str))
if err != nil {
err = fmt.Errorf("Write error in SendRaw: %v", err)
cl.conn.Close()
cl.conn = nil
}
return err
}
func (cl *Client) sendRcv(str string) (msg *Msg, err error) {
if cl.conn == nil {
return nil, errNoConn
}
err = cl.send(str)
if err == nil {
msg, err = cl.rcv()
}
return msg, err
}
func (cl *Client) close() {
if cl != nil && cl.conn != nil {
cl.conn.Close()
cl.conn = nil
}
}
func (cl *Client) rcv() (msg *Msg, err error) {
// we will assume no errors in server side formatting
line, err := cl.reader.ReadString('\n')
if err == nil {
msg, err = parseFirst(line)
if err != nil {
return nil, err
}
if msg.Kind == 'C' {
contents := make([]byte, msg.Numbytes)
var c byte
for i := 0; i < msg.Numbytes; i++ {
if c, err = cl.reader.ReadByte(); err != nil {
break
}
contents[i] = c
}
if err == nil {
msg.Contents = contents
cl.reader.ReadByte() // \r
cl.reader.ReadByte() // \n
}
}
}
if err != nil {
cl.close()
}
return msg, err
}
func parseFirst(line string) (msg *Msg, err error) {
fields := strings.Fields(line)
msg = &Msg{}
// Utility function fieldNum to int
toInt := func(fieldNum int) int {
var i int
if err == nil {
if fieldNum >= len(fields) {
err = errors.New(fmt.Sprintf("Not enough fields. Expected field #%d in %s\n", fieldNum, line))
return 0
}
i, err = strconv.Atoi(fields[fieldNum])
}
return i
}
if len(fields) == 0 {
return nil, errors.New("Empty line. The previous command is likely at fault")
}
switch fields[0] {
case "OK": // OK [version]
msg.Kind = 'O'
if len(fields) > 1 {
msg.Version = toInt(1)
}
case "CONTENTS": // CONTENTS <version> <numbytes> <exptime> \r\n
msg.Kind = 'C'
msg.Version = toInt(1)
msg.Numbytes = toInt(2)
msg.Exptime = toInt(3)
case "ERR_VERSION":
msg.Kind = 'V'
msg.Version = toInt(1)
case "ERR_FILE_NOT_FOUND":
msg.Kind = 'F'
case "ERR_CMD_ERR":
msg.Kind = 'M'
case "ERR_INTERNAL":
msg.Kind = 'I'
case "ERR_REDIRECT":
msg.Kind = 'R'
msg.Contents = []byte(fields[1])
default:
err = errors.New("Unknown response " + fields[0])
}
if err != nil {
return nil, err
} else {
return msg, nil
}
}
| {
t.Fatal(err)
} | conditional_block |
rpc_test.go | package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strconv"
"strings"
//"sync"
"os"
"os/exec"
"sync"
"testing"
"time"
)
var fsp []*exec.Cmd
var leaderUrl string
var num int
var id_from_url map[string]int
func TestStartServers(t *testing.T) {
num = 5
fsp = make([]*exec.Cmd, num)
id_from_url = make(map[string]int)
for i := 0; i < num; i++ {
fsp[i] = exec.Command("./assignment4", strconv.Itoa(i+1))
fsp[i].Stdout = os.Stdout
fsp[i].Stderr = os.Stdout
fsp[i].Stdin = os.Stdin
fsp[i].Start()
id_from_url["localhost:"+strconv.Itoa(8000+i)] = i + 1
}
time.Sleep(2 * time.Second)
for {
leaderUrl = "localhost:8000"
leaderCl := mkClientUrl(t, leaderUrl)
m, _ := leaderCl.read("cs733net")
//fmt.Println("message: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
break
}
} else if m.Kind == 'F' {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func TestRPC_BasicSequential(t *testing.T) {
//fmt.Println("Leader Url ", leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("cs733net")
//fmt.Println(m, err)
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Write file cs733net
data := "Cloud fun"
m, err = leaderCl.write("cs733net", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
// CAS in new value
version1 := m.Version
data2 := "Cloud fun 2"
// Cas new value
m, err = leaderCl.cas("cs733net", version1, data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "cas success", err)
// Expect to read it back
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "read my cas", err)
// Expect Cas to fail with old version
m, err = leaderCl.cas("cs733net", version1, data, 0)
expect(t, m, &Msg{Kind: 'V'}, "cas version mismatch", err)
// Expect a failed cas to not have succeeded. Read should return data2.
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data2)}, "failed cas to not have succeeded", err)
// delete
m, err = leaderCl.delete("cs733net")
expect(t, m, &Msg{Kind: 'O'}, "delete success", err)
// Expect to not find the file
m, err = leaderCl.read("cs733net")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
time.Sleep(1 * time.Second)
}
func TestRPC_Binary(t *testing.T) {
leaderCl := mkClientUrl(t, leaderUrl)
defer leaderCl.close()
// Write binary contents
data := "\x00\x01\r\n\x03" // some non-ascii, some crlf chars
m, err := leaderCl.write("binfile", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back
m, err = leaderCl.read("binfile")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(data)}, "read my write", err)
}
func | (t *testing.T) {
// Should be able to accept a few bytes at a time
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ = clients[0].read("concCas")
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg.Kind = %d, msg.Contents=%s", m.Kind, m.Contents)
}
}
func PTest_Kill_Leader_And_Revive(t *testing.T) {
leaderId := id_from_url[leaderUrl]
leaderCl := mkClientUrl(t, leaderUrl)
data := "Some data before kill"
m, err := leaderCl.write("killers.txt", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
time.Sleep(2 * time.Second)
fsp[leaderId-1].Process.Kill()
//fmt.Println("Killed: ", err, leaderId)
time.Sleep(4 * time.Second) //for elections
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId%num)
//fmt.Println(leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message2: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
//fmt.Println("pppp")
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
}
} else if m.Kind == 'C' {
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
//fmt.Println("ddddd")
new_leader_id := id_from_url[leaderUrl]
data2 := "new data for file"
leaderCl = mkClientUrl(t, leaderUrl)
leaderCl.write("killers.txt", data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
fsp[new_leader_id-1].Process.Kill()
fsp[leaderId-1] = exec.Command("./assignment4", strconv.Itoa(leaderId))
fsp[leaderId-1].Stdout = os.Stdout
fsp[leaderId-1].Stderr = os.Stdout
fsp[leaderId-1].Stdin = os.Stdin
fsp[leaderId-1].Start()
time.Sleep(1 * time.Second)
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId-1)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message3: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data2, err)
break
}
} else if m.Kind == 'C' {
t.Error("Leader elected although log might be incomplete", m)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
}
func Test_Kill_all(t *testing.T) {
for _, fs := range fsp {
fs.Process.Kill()
}
time.Sleep(1 * time.Second)
}
func Test_Clean(t *testing.T) {
for i := 1; i <= num; i++ {
str := strconv.Itoa(i)
os.RemoveAll("mylog" + str)
os.Remove("stateStoreFile" + str)
}
}
//----------------------------------------------------------------------
// Utility functions
func expect(t *testing.T, response *Msg, expected *Msg, errstr string, err error) {
if err != nil {
t.Fatal("Unexpected error: " + err.Error())
}
ok := true
if response.Kind != expected.Kind {
ok = false
errstr += fmt.Sprintf(" Got kind='%c', expected '%c'", response.Kind, expected.Kind)
}
if expected.Version > 0 && expected.Version != response.Version {
ok = false
errstr += " Version mismatch"
}
if response.Kind == 'C' {
if expected.Contents != nil &&
bytes.Compare(response.Contents, expected.Contents) != 0 {
ok = false
}
}
if !ok {
t.Fatal("Expected " + errstr)
}
}
type Msg struct {
// Kind = the first character of the command. For errors, it
// is the first letter after "ERR_", ('V' for ERR_VERSION, for
// example), except for "ERR_CMD_ERR", for which the kind is 'M'
Kind byte
Filename string
Contents []byte
Numbytes int
Exptime int // expiry time in seconds
Version int
}
func (cl *Client) read(filename string) (*Msg, error) {
cmd := "read " + filename + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) write(filename string, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("write %s %d\r\n", filename, len(contents))
} else {
cmd = fmt.Sprintf("write %s %d %d\r\n", filename, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) cas(filename string, version int, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("cas %s %d %d\r\n", filename, version, len(contents))
} else {
cmd = fmt.Sprintf("cas %s %d %d %d\r\n", filename, version, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) delete(filename string) (*Msg, error) {
cmd := "delete " + filename + "\r\n"
return cl.sendRcv(cmd)
}
var errNoConn = errors.New("Connection is closed")
type Client struct {
conn *net.TCPConn
reader *bufio.Reader // a bufio Reader wrapper over conn
}
func mkClient(t *testing.T, host string, port int) *Client {
return mkClientUrl(t, host+":"+strconv.Itoa(port))
}
func mkClientUrl(t *testing.T, url string) *Client {
var client *Client
raddr, err := net.ResolveTCPAddr("tcp", url)
if err == nil {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
client = &Client{conn: conn, reader: bufio.NewReader(conn)}
}
}
if err != nil {
t.Fatal(err)
}
return client
}
func (cl *Client) send(str string) error {
if cl.conn == nil {
return errNoConn
}
_, err := cl.conn.Write([]byte(str))
if err != nil {
err = fmt.Errorf("Write error in SendRaw: %v", err)
cl.conn.Close()
cl.conn = nil
}
return err
}
func (cl *Client) sendRcv(str string) (msg *Msg, err error) {
if cl.conn == nil {
return nil, errNoConn
}
err = cl.send(str)
if err == nil {
msg, err = cl.rcv()
}
return msg, err
}
func (cl *Client) close() {
if cl != nil && cl.conn != nil {
cl.conn.Close()
cl.conn = nil
}
}
func (cl *Client) rcv() (msg *Msg, err error) {
// we will assume no errors in server side formatting
line, err := cl.reader.ReadString('\n')
if err == nil {
msg, err = parseFirst(line)
if err != nil {
return nil, err
}
if msg.Kind == 'C' {
contents := make([]byte, msg.Numbytes)
var c byte
for i := 0; i < msg.Numbytes; i++ {
if c, err = cl.reader.ReadByte(); err != nil {
break
}
contents[i] = c
}
if err == nil {
msg.Contents = contents
cl.reader.ReadByte() // \r
cl.reader.ReadByte() // \n
}
}
}
if err != nil {
cl.close()
}
return msg, err
}
func parseFirst(line string) (msg *Msg, err error) {
fields := strings.Fields(line)
msg = &Msg{}
// Utility function fieldNum to int
toInt := func(fieldNum int) int {
var i int
if err == nil {
if fieldNum >= len(fields) {
err = errors.New(fmt.Sprintf("Not enough fields. Expected field #%d in %s\n", fieldNum, line))
return 0
}
i, err = strconv.Atoi(fields[fieldNum])
}
return i
}
if len(fields) == 0 {
return nil, errors.New("Empty line. The previous command is likely at fault")
}
switch fields[0] {
case "OK": // OK [version]
msg.Kind = 'O'
if len(fields) > 1 {
msg.Version = toInt(1)
}
case "CONTENTS": // CONTENTS <version> <numbytes> <exptime> \r\n
msg.Kind = 'C'
msg.Version = toInt(1)
msg.Numbytes = toInt(2)
msg.Exptime = toInt(3)
case "ERR_VERSION":
msg.Kind = 'V'
msg.Version = toInt(1)
case "ERR_FILE_NOT_FOUND":
msg.Kind = 'F'
case "ERR_CMD_ERR":
msg.Kind = 'M'
case "ERR_INTERNAL":
msg.Kind = 'I'
case "ERR_REDIRECT":
msg.Kind = 'R'
msg.Contents = []byte(fields[1])
default:
err = errors.New("Unknown response " + fields[0])
}
if err != nil {
return nil, err
} else {
return msg, nil
}
}
| TestRPC_Chunks | identifier_name |
dcy.go | package dcy
import (
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/minus5/svckit/env"
"github.com/minus5/svckit/log"
"github.com/minus5/svckit/signal"
"github.com/hashicorp/consul/api"
)
const (
// EnvConsul is location of the consul to use. If not defined local consul is used.
EnvConsul = "SVCKIT_DCY_CONSUL"
// EnvWait if defined dcy will not start until those services are not found in consul.
// Usefull in development environment to controll start order.
EnvWait = "SVCKIT_DCY_CHECK_SVCS"
// EnvFederatedDcs is list of all datacenters
EnvFederatedDcs = "SVCKIT_FEDERATED_DCS"
)
var (
ErrNotFound = errors.New("not found")
ErrKeyNotFound = errors.New("key not found")
)
const (
queryTimeoutSeconds = 30
queryRetries = 10
waitTimeMinutes = 10
localConsulAdr = "127.0.0.1:8500"
)
var (
consul *api.Client
l sync.RWMutex
cache = map[string]Addresses{}
subscribers = map[string][]func(Addresses){}
domain string
dc string
nodeName string
consulAddr = localConsulAdr
federatedDcs []string
)
// Address is service address returned from Consul.
type Address struct {
Address string
Port int
}
// String return address in host:port string.
func (a Address) String() string {
return fmt.Sprintf("%s:%d", a.Address, a.Port)
}
func (a Address) Equal(a2 Address) bool {
return a.Address == a2.Address && a.Port == a2.Port
}
// Addresses is array of service addresses.
type Addresses []Address
// String returns string array in host:port format.
func (a Addresses) String() []string {
addrs := []string{}
for _, addr := range a {
addrs = append(addrs, addr.String())
}
return addrs
}
func (a Addresses) Equal(a2 Addresses) bool {
if len(a) != len(a2) {
return false
}
for _, d := range a {
found := false
for _, d2 := range a2 {
if d.Equal(d2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (a Addresses) Contains(a2 Address) bool {
for _, a1 := range a {
if a1.Equal(a2) {
return true
}
}
return false
}
func (a *Addresses) Append(as Addresses) {
s := []Address(*a)
for _, a1 := range []Address(as) {
if a.Contains(a1) {
continue
}
s = append(s, a1)
}
*a = Addresses(s)
}
// On including package it will try to find consul.
// Will BLOCK until consul is found.
// If not found it will raise fatal.
// To disable finding consul, and use it in test mode set EnvConsul to "-"
// If EnvWait is defined dcy will not start until those services are not found in consul. This is usefull for development environment where we start consul, and other applications which are using dcy.
func init() {
if e, ok := os.LookupEnv(EnvConsul); ok && e != "" {
consulAddr = e
}
if consulAddr == "--" {
return
}
if consulAddr == "-" || (env.InTest() && consulAddr == localConsulAdr) {
noConsulTestMode()
return
}
if _, _, err := net.SplitHostPort(consulAddr); err != nil {
consulAddr = consulAddr + ":8500"
}
if e, ok := os.LookupEnv(EnvFederatedDcs); ok {
federatedDcs = strings.Fields(e)
}
rand.Seed(time.Now().UTC().UnixNano())
mustConnect()
updateEnv()
}
func updateEnv() {
if dc != "" {
env.SetDc(dc)
}
if nodeName != "" {
env.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error) |
// Services returns all services registered in Consul from all of the datacenters
func ServicesByTag(name, tag string) (Addresses, error) {
sn, _ := serviceName(name, domain)
srvs := []Address{}
for _, fdc := range federatedDcs {
s, err := srv(tag, sn, fdc)
if err == nil {
srvs = append(srvs, s...)
}
}
if len(srvs) == 0 {
return srvs, ErrNotFound
}
return srvs, nil
}
// Service will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func Service(name string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, "")
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
// ServiceByTag will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func ServiceByTag(name, tag string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, tag)
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
func oneOf(srvs []Address) Address {
if len(srvs) == 1 {
return srvs[0]
}
return srvs[rand.Intn(len(srvs))]
}
// returns services from one of the datacenters giving priority to the local dc
func servicesWithLocalPriority(name, tag string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv(tag, sn, ldc)
if err == nil && len(srvs) != 0 {
return srvs, err
}
// loop through all datacenters until desired service is found
for _, fdc := range federatedDcs {
// skip local dc since it was already checked
if fdc == dc {
continue
}
srvs, err = srv(tag, sn, fdc)
if err == nil && len(srvs) != 0 {
break
}
}
return srvs, err
}
// ServiceInDc will find one service in Consul claster for specified datacenter
func ServiceInDc(name, dc string) (Address, error) {
return ServiceInDcByTag("", name, dc)
}
// ServiceInDcByTag will find one service in consul claster with tag for specified datacenter
func ServiceInDcByTag(tag, name, dc string) (Address, error) {
srvs, err := srv(tag, name, dc)
if err != nil {
return Address{}, err
}
srv := srvs[rand.Intn(len(srvs))]
return srv, nil
}
// AgentService finds service on this (local) agent.
func AgentService(name string) (Address, error) {
svcs, err := consul.Agent().Services()
if err != nil {
return Address{}, err
}
for _, svc := range svcs {
//fmt.Printf("\t %#v\n", svc)
if svc.Service == name {
addr := svc.Address
if addr == "" {
addr = consulAddr
}
return Address{Address: addr, Port: svc.Port}, nil
}
}
return Address{}, ErrNotFound
}
// Inspect Consul for configuration parameters.
func self() error {
s, err := consul.Agent().Self()
if err != nil {
return err
}
cfg := s["Config"]
version := cfg["Version"].(string)
dc = cfg["Datacenter"].(string)
nodeName = cfg["NodeName"].(string)
if strings.HasPrefix(version, "0.") {
domain = cfg["Domain"].(string)
} else {
if dcfg := s["DebugConfig"]; dcfg != nil {
domain = dcfg["DNSDomain"].(string)
}
}
return nil
}
// Call consul LockKey api function.
func LockKey(key string) (*api.Lock, error) {
opts := &api.LockOptions{
Key: key,
LockWaitTime: 5 * time.Second,
}
return consul.LockOpts(opts)
}
// NodeName returns Node name as defined in Consul.
func NodeName() string {
return nodeName
}
// Dc returns datacenter name.
func Dc() string {
return dc
}
// KV reads key from Consul key value storage.
func KV(key string) (string, error) {
kv := consul.KV()
pair, _, err := kv.Get(key, nil)
if err != nil {
return "", err
}
if pair == nil {
return "", ErrKeyNotFound
}
return string(pair.Value), nil
}
// KVs read keys from Consul key value storage.
func KVs(key string) (map[string]string, error) {
kv := consul.KV()
entries, _, err := kv.List(key, nil)
if err != nil {
return nil, err
}
if entries == nil {
return nil, ErrKeyNotFound
}
m := make(map[string]string)
for _, e := range entries {
k := strings.TrimPrefix(e.Key, key)
k = strings.TrimPrefix(k, "/")
m[k] = string(e.Value)
}
return m, nil
}
// URL discovers host from url.
// If there are multiple services will randomly choose one.
func URL(url string) string {
scheme, host, _, path, query := unpackURL(url)
// log.S("url", url).S("host", host).Debug(fmt.Sprintf("should discover: %v", shouldDiscoverHost(host)))
if !shouldDiscoverHost(host) {
return url
}
srvs, err := Services(host)
if err != nil {
log.Error(err)
return url
}
// log.I("len_srvs", len(srvs)).Debug("service entries")
if len(srvs) == 0 {
return url
}
srv := srvs[rand.Intn(len(srvs))]
return packURL(scheme, srv.String(), "", path, query)
}
// shouldDiscoverHost - ima li smisla pitati consul za service discovery
func shouldDiscoverHost(name string) bool {
parts := strings.Split(name, ".")
if len(parts) == 1 {
if parts[0] == "localhost" {
return false
}
return true
}
return parts[len(parts)-1] == domain
}
func unpackURL(s string) (scheme, host, port, path string, query url.Values) {
if strings.Contains(s, "//") {
u, err := url.Parse(s)
if err != nil {
return
}
scheme = u.Scheme
path = u.Path
host = u.Host
query = u.Query()
h, p, err := net.SplitHostPort(u.Host)
if err == nil {
host = h
port = p
}
return
}
host = s
h, p, err := net.SplitHostPort(s)
if err == nil {
host = h
port = p
}
return
}
func packURL(scheme, host, port, path string, query url.Values) (url string) {
if scheme != "" {
url = scheme + "://"
}
url += host
if port != "" {
url += ":" + port
}
url += path
if len(query) > 0 {
url += "?" + query.Encode()
}
return url
}
// Agent returns ref to consul agent.
// Only for use in sr package below.
func Agent() *api.Agent {
return consul.Agent()
}
// MustConnect connects to real consul.
// Useful in tests, when dcy is started in test mode to force to connect to real consul.
func MustConnect() {
mustConnect()
}
// Subscribe on service changes over all federated datacenters.
// Changes in Consul for service `name` will be passed to handler.
func Subscribe(name string, handler func(Addresses)) {
SubscribeByTag(name, "", handler)
}
// SubscribeByTag subscribes on service with specific tag
func SubscribeByTag(name, tag string, handler func(Addresses)) {
_, err := ServicesByTag(name, tag) // query for service in all of the datacenters so monitor goroutines start
if err != nil {
log.S("name", name).S("tag", tag).Error(err)
}
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
a = make([]func(Addresses), 0)
}
a = append(a, handler)
subscribers[sn] = a
}
func notify(name string, srvs Addresses) {
if s, ok := subscribers[name]; ok {
for _, h := range s {
h(srvs)
}
}
}
// Unsubscribe from service changes.
func Unsubscribe(name string, handler func(Addresses)) {
UnsubscribeByTag(name, "", handler)
}
func UnsubscribeByTag(name, tag string, handler func(Addresses)) {
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
return
}
for i, h := range a {
sf1 := reflect.ValueOf(h)
sf2 := reflect.ValueOf(handler)
if sf1.Pointer() == sf2.Pointer() {
a = append(a[:i], a[i+1:]...)
break
}
}
subscribers[sn] = a
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
| {
return ServicesByTag(name, "")
} | identifier_body |
dcy.go | package dcy
import (
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/minus5/svckit/env"
"github.com/minus5/svckit/log"
"github.com/minus5/svckit/signal"
"github.com/hashicorp/consul/api"
)
const (
// EnvConsul is location of the consul to use. If not defined local consul is used.
EnvConsul = "SVCKIT_DCY_CONSUL"
// EnvWait if defined dcy will not start until those services are not found in consul.
// Usefull in development environment to controll start order.
EnvWait = "SVCKIT_DCY_CHECK_SVCS"
// EnvFederatedDcs is list of all datacenters
EnvFederatedDcs = "SVCKIT_FEDERATED_DCS"
)
var (
ErrNotFound = errors.New("not found")
ErrKeyNotFound = errors.New("key not found")
)
const (
queryTimeoutSeconds = 30
queryRetries = 10
waitTimeMinutes = 10
localConsulAdr = "127.0.0.1:8500"
)
var (
consul *api.Client
l sync.RWMutex
cache = map[string]Addresses{}
subscribers = map[string][]func(Addresses){}
domain string
dc string
nodeName string
consulAddr = localConsulAdr
federatedDcs []string
)
// Address is service address returned from Consul.
type Address struct {
Address string
Port int
}
// String return address in host:port string.
func (a Address) String() string {
return fmt.Sprintf("%s:%d", a.Address, a.Port)
}
func (a Address) Equal(a2 Address) bool {
return a.Address == a2.Address && a.Port == a2.Port
}
// Addresses is array of service addresses.
type Addresses []Address
// String returns string array in host:port format.
func (a Addresses) String() []string {
addrs := []string{}
for _, addr := range a {
addrs = append(addrs, addr.String())
}
return addrs
}
func (a Addresses) Equal(a2 Addresses) bool {
if len(a) != len(a2) {
return false
}
for _, d := range a {
found := false
for _, d2 := range a2 {
if d.Equal(d2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (a Addresses) Contains(a2 Address) bool {
for _, a1 := range a {
if a1.Equal(a2) {
return true
}
}
return false
}
func (a *Addresses) Append(as Addresses) {
s := []Address(*a)
for _, a1 := range []Address(as) {
if a.Contains(a1) {
continue
}
s = append(s, a1)
}
*a = Addresses(s)
}
// On including package it will try to find consul.
// Will BLOCK until consul is found.
// If not found it will raise fatal.
// To disable finding consul, and use it in test mode set EnvConsul to "-"
// If EnvWait is defined dcy will not start until those services are not found in consul. This is usefull for development environment where we start consul, and other applications which are using dcy.
func init() {
if e, ok := os.LookupEnv(EnvConsul); ok && e != "" {
consulAddr = e
}
if consulAddr == "--" {
return
}
if consulAddr == "-" || (env.InTest() && consulAddr == localConsulAdr) {
noConsulTestMode()
return
}
if _, _, err := net.SplitHostPort(consulAddr); err != nil {
consulAddr = consulAddr + ":8500"
}
if e, ok := os.LookupEnv(EnvFederatedDcs); ok {
federatedDcs = strings.Fields(e)
}
rand.Seed(time.Now().UTC().UnixNano())
mustConnect()
updateEnv()
}
func updateEnv() {
if dc != "" {
env.SetDc(dc)
}
if nodeName != "" {
env.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) |
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error) {
return ServicesByTag(name, "")
}
// Services returns all services registered in Consul from all of the datacenters
func ServicesByTag(name, tag string) (Addresses, error) {
sn, _ := serviceName(name, domain)
srvs := []Address{}
for _, fdc := range federatedDcs {
s, err := srv(tag, sn, fdc)
if err == nil {
srvs = append(srvs, s...)
}
}
if len(srvs) == 0 {
return srvs, ErrNotFound
}
return srvs, nil
}
// Service will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func Service(name string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, "")
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
// ServiceByTag will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func ServiceByTag(name, tag string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, tag)
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
func oneOf(srvs []Address) Address {
if len(srvs) == 1 {
return srvs[0]
}
return srvs[rand.Intn(len(srvs))]
}
// returns services from one of the datacenters giving priority to the local dc
func servicesWithLocalPriority(name, tag string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv(tag, sn, ldc)
if err == nil && len(srvs) != 0 {
return srvs, err
}
// loop through all datacenters until desired service is found
for _, fdc := range federatedDcs {
// skip local dc since it was already checked
if fdc == dc {
continue
}
srvs, err = srv(tag, sn, fdc)
if err == nil && len(srvs) != 0 {
break
}
}
return srvs, err
}
// ServiceInDc will find one service in Consul claster for specified datacenter
func ServiceInDc(name, dc string) (Address, error) {
return ServiceInDcByTag("", name, dc)
}
// ServiceInDcByTag will find one service in consul claster with tag for specified datacenter
func ServiceInDcByTag(tag, name, dc string) (Address, error) {
srvs, err := srv(tag, name, dc)
if err != nil {
return Address{}, err
}
srv := srvs[rand.Intn(len(srvs))]
return srv, nil
}
// AgentService finds service on this (local) agent.
func AgentService(name string) (Address, error) {
svcs, err := consul.Agent().Services()
if err != nil {
return Address{}, err
}
for _, svc := range svcs {
//fmt.Printf("\t %#v\n", svc)
if svc.Service == name {
addr := svc.Address
if addr == "" {
addr = consulAddr
}
return Address{Address: addr, Port: svc.Port}, nil
}
}
return Address{}, ErrNotFound
}
// Inspect Consul for configuration parameters.
func self() error {
s, err := consul.Agent().Self()
if err != nil {
return err
}
cfg := s["Config"]
version := cfg["Version"].(string)
dc = cfg["Datacenter"].(string)
nodeName = cfg["NodeName"].(string)
if strings.HasPrefix(version, "0.") {
domain = cfg["Domain"].(string)
} else {
if dcfg := s["DebugConfig"]; dcfg != nil {
domain = dcfg["DNSDomain"].(string)
}
}
return nil
}
// Call consul LockKey api function.
func LockKey(key string) (*api.Lock, error) {
opts := &api.LockOptions{
Key: key,
LockWaitTime: 5 * time.Second,
}
return consul.LockOpts(opts)
}
// NodeName returns Node name as defined in Consul.
func NodeName() string {
return nodeName
}
// Dc returns datacenter name.
func Dc() string {
return dc
}
// KV reads key from Consul key value storage.
func KV(key string) (string, error) {
kv := consul.KV()
pair, _, err := kv.Get(key, nil)
if err != nil {
return "", err
}
if pair == nil {
return "", ErrKeyNotFound
}
return string(pair.Value), nil
}
// KVs read keys from Consul key value storage.
func KVs(key string) (map[string]string, error) {
kv := consul.KV()
entries, _, err := kv.List(key, nil)
if err != nil {
return nil, err
}
if entries == nil {
return nil, ErrKeyNotFound
}
m := make(map[string]string)
for _, e := range entries {
k := strings.TrimPrefix(e.Key, key)
k = strings.TrimPrefix(k, "/")
m[k] = string(e.Value)
}
return m, nil
}
// URL discovers host from url.
// If there are multiple services will randomly choose one.
func URL(url string) string {
scheme, host, _, path, query := unpackURL(url)
// log.S("url", url).S("host", host).Debug(fmt.Sprintf("should discover: %v", shouldDiscoverHost(host)))
if !shouldDiscoverHost(host) {
return url
}
srvs, err := Services(host)
if err != nil {
log.Error(err)
return url
}
// log.I("len_srvs", len(srvs)).Debug("service entries")
if len(srvs) == 0 {
return url
}
srv := srvs[rand.Intn(len(srvs))]
return packURL(scheme, srv.String(), "", path, query)
}
// shouldDiscoverHost - ima li smisla pitati consul za service discovery
func shouldDiscoverHost(name string) bool {
parts := strings.Split(name, ".")
if len(parts) == 1 {
if parts[0] == "localhost" {
return false
}
return true
}
return parts[len(parts)-1] == domain
}
func unpackURL(s string) (scheme, host, port, path string, query url.Values) {
if strings.Contains(s, "//") {
u, err := url.Parse(s)
if err != nil {
return
}
scheme = u.Scheme
path = u.Path
host = u.Host
query = u.Query()
h, p, err := net.SplitHostPort(u.Host)
if err == nil {
host = h
port = p
}
return
}
host = s
h, p, err := net.SplitHostPort(s)
if err == nil {
host = h
port = p
}
return
}
func packURL(scheme, host, port, path string, query url.Values) (url string) {
if scheme != "" {
url = scheme + "://"
}
url += host
if port != "" {
url += ":" + port
}
url += path
if len(query) > 0 {
url += "?" + query.Encode()
}
return url
}
// Agent returns ref to consul agent.
// Only for use in sr package below.
func Agent() *api.Agent {
return consul.Agent()
}
// MustConnect connects to real consul.
// Useful in tests, when dcy is started in test mode to force to connect to real consul.
func MustConnect() {
mustConnect()
}
// Subscribe on service changes over all federated datacenters.
// Changes in Consul for service `name` will be passed to handler.
func Subscribe(name string, handler func(Addresses)) {
SubscribeByTag(name, "", handler)
}
// SubscribeByTag subscribes on service with specific tag
func SubscribeByTag(name, tag string, handler func(Addresses)) {
_, err := ServicesByTag(name, tag) // query for service in all of the datacenters so monitor goroutines start
if err != nil {
log.S("name", name).S("tag", tag).Error(err)
}
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
a = make([]func(Addresses), 0)
}
a = append(a, handler)
subscribers[sn] = a
}
func notify(name string, srvs Addresses) {
if s, ok := subscribers[name]; ok {
for _, h := range s {
h(srvs)
}
}
}
// Unsubscribe from service changes.
func Unsubscribe(name string, handler func(Addresses)) {
UnsubscribeByTag(name, "", handler)
}
func UnsubscribeByTag(name, tag string, handler func(Addresses)) {
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
return
}
for i, h := range a {
sf1 := reflect.ValueOf(h)
sf2 := reflect.ValueOf(handler)
if sf1.Pointer() == sf2.Pointer() {
a = append(a[:i], a[i+1:]...)
break
}
}
subscribers[sn] = a
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
| {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
} | conditional_block |
dcy.go | package dcy
import (
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/minus5/svckit/env"
"github.com/minus5/svckit/log"
"github.com/minus5/svckit/signal"
"github.com/hashicorp/consul/api"
)
const (
// EnvConsul is location of the consul to use. If not defined local consul is used.
EnvConsul = "SVCKIT_DCY_CONSUL"
// EnvWait if defined dcy will not start until those services are not found in consul.
// Usefull in development environment to controll start order.
EnvWait = "SVCKIT_DCY_CHECK_SVCS"
// EnvFederatedDcs is list of all datacenters
EnvFederatedDcs = "SVCKIT_FEDERATED_DCS"
)
var (
ErrNotFound = errors.New("not found")
ErrKeyNotFound = errors.New("key not found")
)
const (
queryTimeoutSeconds = 30
queryRetries = 10
waitTimeMinutes = 10
localConsulAdr = "127.0.0.1:8500"
)
var (
consul *api.Client
l sync.RWMutex
cache = map[string]Addresses{}
subscribers = map[string][]func(Addresses){}
domain string
dc string
nodeName string
consulAddr = localConsulAdr
federatedDcs []string
)
// Address is service address returned from Consul.
type Address struct {
Address string
Port int
}
// String return address in host:port string.
func (a Address) String() string {
return fmt.Sprintf("%s:%d", a.Address, a.Port)
}
func (a Address) Equal(a2 Address) bool {
return a.Address == a2.Address && a.Port == a2.Port
}
// Addresses is array of service addresses.
type Addresses []Address
// String returns string array in host:port format.
func (a Addresses) String() []string {
addrs := []string{}
for _, addr := range a {
addrs = append(addrs, addr.String())
}
return addrs
}
func (a Addresses) Equal(a2 Addresses) bool {
if len(a) != len(a2) {
return false
}
for _, d := range a {
found := false
for _, d2 := range a2 {
if d.Equal(d2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (a Addresses) Contains(a2 Address) bool {
for _, a1 := range a {
if a1.Equal(a2) {
return true
}
}
return false
}
func (a *Addresses) Append(as Addresses) {
s := []Address(*a)
for _, a1 := range []Address(as) {
if a.Contains(a1) {
continue
}
s = append(s, a1)
}
*a = Addresses(s)
}
// On including package it will try to find consul.
// Will BLOCK until consul is found.
// If not found it will raise fatal.
// To disable finding consul, and use it in test mode set EnvConsul to "-"
// If EnvWait is defined dcy will not start until those services are not found in consul. This is usefull for development environment where we start consul, and other applications which are using dcy.
func init() {
if e, ok := os.LookupEnv(EnvConsul); ok && e != "" {
consulAddr = e
}
if consulAddr == "--" {
return
}
if consulAddr == "-" || (env.InTest() && consulAddr == localConsulAdr) {
noConsulTestMode()
return
}
if _, _, err := net.SplitHostPort(consulAddr); err != nil {
consulAddr = consulAddr + ":8500"
}
if e, ok := os.LookupEnv(EnvFederatedDcs); ok {
federatedDcs = strings.Fields(e)
}
rand.Seed(time.Now().UTC().UnixNano())
mustConnect()
updateEnv()
}
func updateEnv() {
if dc != "" {
env.SetDc(dc)
}
if nodeName != "" {
env.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error) {
return ServicesByTag(name, "")
}
// Services returns all services registered in Consul from all of the datacenters
func ServicesByTag(name, tag string) (Addresses, error) {
sn, _ := serviceName(name, domain)
srvs := []Address{}
for _, fdc := range federatedDcs {
s, err := srv(tag, sn, fdc)
if err == nil {
srvs = append(srvs, s...)
}
}
if len(srvs) == 0 {
return srvs, ErrNotFound
}
return srvs, nil
}
// Service will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func Service(name string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, "")
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
// ServiceByTag will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func ServiceByTag(name, tag string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, tag)
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
func oneOf(srvs []Address) Address {
if len(srvs) == 1 {
return srvs[0]
}
return srvs[rand.Intn(len(srvs))]
}
// returns services from one of the datacenters giving priority to the local dc
func servicesWithLocalPriority(name, tag string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv(tag, sn, ldc)
if err == nil && len(srvs) != 0 {
return srvs, err
}
// loop through all datacenters until desired service is found
for _, fdc := range federatedDcs {
// skip local dc since it was already checked
if fdc == dc {
continue
}
srvs, err = srv(tag, sn, fdc)
if err == nil && len(srvs) != 0 {
break
}
}
return srvs, err
}
// ServiceInDc will find one service in Consul claster for specified datacenter
func ServiceInDc(name, dc string) (Address, error) {
return ServiceInDcByTag("", name, dc)
}
// ServiceInDcByTag will find one service in consul claster with tag for specified datacenter
func ServiceInDcByTag(tag, name, dc string) (Address, error) {
srvs, err := srv(tag, name, dc)
if err != nil {
return Address{}, err
}
srv := srvs[rand.Intn(len(srvs))]
return srv, nil
}
// AgentService finds service on this (local) agent.
func AgentService(name string) (Address, error) {
svcs, err := consul.Agent().Services()
if err != nil {
return Address{}, err
}
for _, svc := range svcs {
//fmt.Printf("\t %#v\n", svc)
if svc.Service == name {
addr := svc.Address
if addr == "" {
addr = consulAddr
}
return Address{Address: addr, Port: svc.Port}, nil
}
}
return Address{}, ErrNotFound
}
// Inspect Consul for configuration parameters.
func self() error {
s, err := consul.Agent().Self()
if err != nil {
return err
}
cfg := s["Config"]
version := cfg["Version"].(string)
dc = cfg["Datacenter"].(string)
nodeName = cfg["NodeName"].(string)
if strings.HasPrefix(version, "0.") {
domain = cfg["Domain"].(string)
} else {
if dcfg := s["DebugConfig"]; dcfg != nil {
domain = dcfg["DNSDomain"].(string)
}
}
return nil
}
// Call consul LockKey api function.
func LockKey(key string) (*api.Lock, error) {
opts := &api.LockOptions{
Key: key,
LockWaitTime: 5 * time.Second,
}
return consul.LockOpts(opts)
}
// NodeName returns Node name as defined in Consul.
func NodeName() string {
return nodeName
}
// Dc returns datacenter name.
func Dc() string {
return dc
}
// KV reads key from Consul key value storage.
func KV(key string) (string, error) {
kv := consul.KV()
pair, _, err := kv.Get(key, nil)
if err != nil {
return "", err
}
if pair == nil {
return "", ErrKeyNotFound
}
return string(pair.Value), nil
}
// KVs read keys from Consul key value storage.
func KVs(key string) (map[string]string, error) {
kv := consul.KV()
entries, _, err := kv.List(key, nil)
if err != nil {
return nil, err
}
if entries == nil {
return nil, ErrKeyNotFound
}
m := make(map[string]string)
for _, e := range entries {
k := strings.TrimPrefix(e.Key, key)
k = strings.TrimPrefix(k, "/")
m[k] = string(e.Value)
}
return m, nil
}
// URL discovers host from url.
// If there are multiple services will randomly choose one.
func URL(url string) string {
scheme, host, _, path, query := unpackURL(url)
// log.S("url", url).S("host", host).Debug(fmt.Sprintf("should discover: %v", shouldDiscoverHost(host)))
if !shouldDiscoverHost(host) {
return url
}
srvs, err := Services(host)
if err != nil {
log.Error(err)
return url
}
// log.I("len_srvs", len(srvs)).Debug("service entries")
if len(srvs) == 0 {
return url
}
srv := srvs[rand.Intn(len(srvs))]
return packURL(scheme, srv.String(), "", path, query)
}
// shouldDiscoverHost - ima li smisla pitati consul za service discovery
func | (name string) bool {
parts := strings.Split(name, ".")
if len(parts) == 1 {
if parts[0] == "localhost" {
return false
}
return true
}
return parts[len(parts)-1] == domain
}
func unpackURL(s string) (scheme, host, port, path string, query url.Values) {
if strings.Contains(s, "//") {
u, err := url.Parse(s)
if err != nil {
return
}
scheme = u.Scheme
path = u.Path
host = u.Host
query = u.Query()
h, p, err := net.SplitHostPort(u.Host)
if err == nil {
host = h
port = p
}
return
}
host = s
h, p, err := net.SplitHostPort(s)
if err == nil {
host = h
port = p
}
return
}
func packURL(scheme, host, port, path string, query url.Values) (url string) {
if scheme != "" {
url = scheme + "://"
}
url += host
if port != "" {
url += ":" + port
}
url += path
if len(query) > 0 {
url += "?" + query.Encode()
}
return url
}
// Agent returns ref to consul agent.
// Only for use in sr package below.
func Agent() *api.Agent {
return consul.Agent()
}
// MustConnect connects to real consul.
// Useful in tests, when dcy is started in test mode to force to connect to real consul.
func MustConnect() {
mustConnect()
}
// Subscribe on service changes over all federated datacenters.
// Changes in Consul for service `name` will be passed to handler.
func Subscribe(name string, handler func(Addresses)) {
SubscribeByTag(name, "", handler)
}
// SubscribeByTag subscribes on service with specific tag
func SubscribeByTag(name, tag string, handler func(Addresses)) {
_, err := ServicesByTag(name, tag) // query for service in all of the datacenters so monitor goroutines start
if err != nil {
log.S("name", name).S("tag", tag).Error(err)
}
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
a = make([]func(Addresses), 0)
}
a = append(a, handler)
subscribers[sn] = a
}
func notify(name string, srvs Addresses) {
if s, ok := subscribers[name]; ok {
for _, h := range s {
h(srvs)
}
}
}
// Unsubscribe from service changes.
func Unsubscribe(name string, handler func(Addresses)) {
UnsubscribeByTag(name, "", handler)
}
func UnsubscribeByTag(name, tag string, handler func(Addresses)) {
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
return
}
for i, h := range a {
sf1 := reflect.ValueOf(h)
sf2 := reflect.ValueOf(handler)
if sf1.Pointer() == sf2.Pointer() {
a = append(a[:i], a[i+1:]...)
break
}
}
subscribers[sn] = a
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
| shouldDiscoverHost | identifier_name |
dcy.go | package dcy
import (
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/minus5/svckit/env"
"github.com/minus5/svckit/log"
"github.com/minus5/svckit/signal"
"github.com/hashicorp/consul/api"
)
const (
// EnvConsul is location of the consul to use. If not defined local consul is used.
EnvConsul = "SVCKIT_DCY_CONSUL"
// EnvWait if defined dcy will not start until those services are not found in consul.
// Usefull in development environment to controll start order.
EnvWait = "SVCKIT_DCY_CHECK_SVCS"
// EnvFederatedDcs is list of all datacenters
EnvFederatedDcs = "SVCKIT_FEDERATED_DCS"
)
var (
ErrNotFound = errors.New("not found")
ErrKeyNotFound = errors.New("key not found")
)
const (
queryTimeoutSeconds = 30
queryRetries = 10
waitTimeMinutes = 10
localConsulAdr = "127.0.0.1:8500"
)
var (
consul *api.Client
l sync.RWMutex
cache = map[string]Addresses{}
subscribers = map[string][]func(Addresses){}
domain string
dc string
nodeName string | federatedDcs []string
)
// Address is service address returned from Consul.
type Address struct {
Address string
Port int
}
// String return address in host:port string.
func (a Address) String() string {
return fmt.Sprintf("%s:%d", a.Address, a.Port)
}
func (a Address) Equal(a2 Address) bool {
return a.Address == a2.Address && a.Port == a2.Port
}
// Addresses is array of service addresses.
type Addresses []Address
// String returns string array in host:port format.
func (a Addresses) String() []string {
addrs := []string{}
for _, addr := range a {
addrs = append(addrs, addr.String())
}
return addrs
}
func (a Addresses) Equal(a2 Addresses) bool {
if len(a) != len(a2) {
return false
}
for _, d := range a {
found := false
for _, d2 := range a2 {
if d.Equal(d2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (a Addresses) Contains(a2 Address) bool {
for _, a1 := range a {
if a1.Equal(a2) {
return true
}
}
return false
}
func (a *Addresses) Append(as Addresses) {
s := []Address(*a)
for _, a1 := range []Address(as) {
if a.Contains(a1) {
continue
}
s = append(s, a1)
}
*a = Addresses(s)
}
// On including package it will try to find consul.
// Will BLOCK until consul is found.
// If not found it will raise fatal.
// To disable finding consul, and use it in test mode set EnvConsul to "-"
// If EnvWait is defined dcy will not start until those services are not found in consul. This is usefull for development environment where we start consul, and other applications which are using dcy.
func init() {
if e, ok := os.LookupEnv(EnvConsul); ok && e != "" {
consulAddr = e
}
if consulAddr == "--" {
return
}
if consulAddr == "-" || (env.InTest() && consulAddr == localConsulAdr) {
noConsulTestMode()
return
}
if _, _, err := net.SplitHostPort(consulAddr); err != nil {
consulAddr = consulAddr + ":8500"
}
if e, ok := os.LookupEnv(EnvFederatedDcs); ok {
federatedDcs = strings.Fields(e)
}
rand.Seed(time.Now().UTC().UnixNano())
mustConnect()
updateEnv()
}
func updateEnv() {
if dc != "" {
env.SetDc(dc)
}
if nodeName != "" {
env.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error) {
return ServicesByTag(name, "")
}
// Services returns all services registered in Consul from all of the datacenters
func ServicesByTag(name, tag string) (Addresses, error) {
sn, _ := serviceName(name, domain)
srvs := []Address{}
for _, fdc := range federatedDcs {
s, err := srv(tag, sn, fdc)
if err == nil {
srvs = append(srvs, s...)
}
}
if len(srvs) == 0 {
return srvs, ErrNotFound
}
return srvs, nil
}
// Service will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func Service(name string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, "")
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
// ServiceByTag will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func ServiceByTag(name, tag string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, tag)
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
func oneOf(srvs []Address) Address {
if len(srvs) == 1 {
return srvs[0]
}
return srvs[rand.Intn(len(srvs))]
}
// returns services from one of the datacenters giving priority to the local dc
func servicesWithLocalPriority(name, tag string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv(tag, sn, ldc)
if err == nil && len(srvs) != 0 {
return srvs, err
}
// loop through all datacenters until desired service is found
for _, fdc := range federatedDcs {
// skip local dc since it was already checked
if fdc == dc {
continue
}
srvs, err = srv(tag, sn, fdc)
if err == nil && len(srvs) != 0 {
break
}
}
return srvs, err
}
// ServiceInDc will find one service in Consul claster for specified datacenter
func ServiceInDc(name, dc string) (Address, error) {
return ServiceInDcByTag("", name, dc)
}
// ServiceInDcByTag will find one service in consul claster with tag for specified datacenter
func ServiceInDcByTag(tag, name, dc string) (Address, error) {
srvs, err := srv(tag, name, dc)
if err != nil {
return Address{}, err
}
srv := srvs[rand.Intn(len(srvs))]
return srv, nil
}
// AgentService finds service on this (local) agent.
func AgentService(name string) (Address, error) {
svcs, err := consul.Agent().Services()
if err != nil {
return Address{}, err
}
for _, svc := range svcs {
//fmt.Printf("\t %#v\n", svc)
if svc.Service == name {
addr := svc.Address
if addr == "" {
addr = consulAddr
}
return Address{Address: addr, Port: svc.Port}, nil
}
}
return Address{}, ErrNotFound
}
// Inspect Consul for configuration parameters.
func self() error {
s, err := consul.Agent().Self()
if err != nil {
return err
}
cfg := s["Config"]
version := cfg["Version"].(string)
dc = cfg["Datacenter"].(string)
nodeName = cfg["NodeName"].(string)
if strings.HasPrefix(version, "0.") {
domain = cfg["Domain"].(string)
} else {
if dcfg := s["DebugConfig"]; dcfg != nil {
domain = dcfg["DNSDomain"].(string)
}
}
return nil
}
// Call consul LockKey api function.
func LockKey(key string) (*api.Lock, error) {
opts := &api.LockOptions{
Key: key,
LockWaitTime: 5 * time.Second,
}
return consul.LockOpts(opts)
}
// NodeName returns Node name as defined in Consul.
func NodeName() string {
return nodeName
}
// Dc returns datacenter name.
func Dc() string {
return dc
}
// KV reads key from Consul key value storage.
func KV(key string) (string, error) {
kv := consul.KV()
pair, _, err := kv.Get(key, nil)
if err != nil {
return "", err
}
if pair == nil {
return "", ErrKeyNotFound
}
return string(pair.Value), nil
}
// KVs read keys from Consul key value storage.
func KVs(key string) (map[string]string, error) {
kv := consul.KV()
entries, _, err := kv.List(key, nil)
if err != nil {
return nil, err
}
if entries == nil {
return nil, ErrKeyNotFound
}
m := make(map[string]string)
for _, e := range entries {
k := strings.TrimPrefix(e.Key, key)
k = strings.TrimPrefix(k, "/")
m[k] = string(e.Value)
}
return m, nil
}
// URL discovers host from url.
// If there are multiple services will randomly choose one.
func URL(url string) string {
scheme, host, _, path, query := unpackURL(url)
// log.S("url", url).S("host", host).Debug(fmt.Sprintf("should discover: %v", shouldDiscoverHost(host)))
if !shouldDiscoverHost(host) {
return url
}
srvs, err := Services(host)
if err != nil {
log.Error(err)
return url
}
// log.I("len_srvs", len(srvs)).Debug("service entries")
if len(srvs) == 0 {
return url
}
srv := srvs[rand.Intn(len(srvs))]
return packURL(scheme, srv.String(), "", path, query)
}
// shouldDiscoverHost - ima li smisla pitati consul za service discovery
func shouldDiscoverHost(name string) bool {
parts := strings.Split(name, ".")
if len(parts) == 1 {
if parts[0] == "localhost" {
return false
}
return true
}
return parts[len(parts)-1] == domain
}
func unpackURL(s string) (scheme, host, port, path string, query url.Values) {
if strings.Contains(s, "//") {
u, err := url.Parse(s)
if err != nil {
return
}
scheme = u.Scheme
path = u.Path
host = u.Host
query = u.Query()
h, p, err := net.SplitHostPort(u.Host)
if err == nil {
host = h
port = p
}
return
}
host = s
h, p, err := net.SplitHostPort(s)
if err == nil {
host = h
port = p
}
return
}
func packURL(scheme, host, port, path string, query url.Values) (url string) {
if scheme != "" {
url = scheme + "://"
}
url += host
if port != "" {
url += ":" + port
}
url += path
if len(query) > 0 {
url += "?" + query.Encode()
}
return url
}
// Agent returns ref to consul agent.
// Only for use in sr package below.
func Agent() *api.Agent {
return consul.Agent()
}
// MustConnect connects to real consul.
// Useful in tests, when dcy is started in test mode to force to connect to real consul.
func MustConnect() {
mustConnect()
}
// Subscribe on service changes over all federated datacenters.
// Changes in Consul for service `name` will be passed to handler.
func Subscribe(name string, handler func(Addresses)) {
SubscribeByTag(name, "", handler)
}
// SubscribeByTag subscribes on service with specific tag
func SubscribeByTag(name, tag string, handler func(Addresses)) {
_, err := ServicesByTag(name, tag) // query for service in all of the datacenters so monitor goroutines start
if err != nil {
log.S("name", name).S("tag", tag).Error(err)
}
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
a = make([]func(Addresses), 0)
}
a = append(a, handler)
subscribers[sn] = a
}
func notify(name string, srvs Addresses) {
if s, ok := subscribers[name]; ok {
for _, h := range s {
h(srvs)
}
}
}
// Unsubscribe from service changes.
func Unsubscribe(name string, handler func(Addresses)) {
UnsubscribeByTag(name, "", handler)
}
func UnsubscribeByTag(name, tag string, handler func(Addresses)) {
sn, _ := serviceName(name, domain)
l.Lock()
defer l.Unlock()
sn = cacheKey(tag, sn, "")
a := subscribers[sn]
if a == nil {
return
}
for i, h := range a {
sf1 := reflect.ValueOf(h)
sf2 := reflect.ValueOf(handler)
if sf1.Pointer() == sf2.Pointer() {
a = append(a[:i], a[i+1:]...)
break
}
}
subscribers[sn] = a
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
} | consulAddr = localConsulAdr | random_line_split |
opendocument_html_xslt.py | # coding: utf-8
from logging import DEBUG
import zipfile
import os
import tempfile
import shutil
from StringIO import StringIO
from zope.interface import implements |
from plone.transforms.interfaces import ITransform, IRankedTransform
from plone.transforms.message import PloneMessageFactory as _
from plone.transforms.transform import TransformResult
from plone.transforms.log import log
import plone.opendocument.utils as utils
HAS_LXML = True
try:
from lxml import etree
except ImportError:
HAS_LXML = False
class OpendocumentHtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag = tag.replace(dc, u'')
#<meta:user-defined> elements
if tag.endswith('user-defined'):
tag = unicode(m.get(\
'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}name'))
#<meta:document-statistic> elements
if tag.endswith('document-statistic'):
for tag_, text_ in m.items():
tag_ = unicode(tag_)
tag_ = tag_.replace(meta, u'')
text_ = unicode(text_).rstrip().lstrip()
self.metadata['meta:' + tag_] = text_
continue
#skip empty elements
if not m.text:
continue
self.metadata[prefix + ':' + tag] = text
def _addCssSelectorPrefix(self, htmlObject, selectorPrefix):
'''
This function takes the first style element of htmlObject (etree object)
and adds the selectorPrefix (string) to every CSS selector.
'''
style = None
style = htmlObject.find('.//{http://www.w3.org/1999/xhtml}style')
if not style is None:
text = style.text.replace('\n.', '\n' + selectorPrefix + '.')
style.text = text | random_line_split | |
opendocument_html_xslt.py | # coding: utf-8
from logging import DEBUG
import zipfile
import os
import tempfile
import shutil
from StringIO import StringIO
from zope.interface import implements
from plone.transforms.interfaces import ITransform, IRankedTransform
from plone.transforms.message import PloneMessageFactory as _
from plone.transforms.transform import TransformResult
from plone.transforms.log import log
import plone.opendocument.utils as utils
HAS_LXML = True
try:
from lxml import etree
except ImportError:
HAS_LXML = False
class OpendocumentHtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
|
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag = tag.replace(dc, u'')
#<meta:user-defined> elements
if tag.endswith('user-defined'):
tag = unicode(m.get(\
'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}name'))
#<meta:document-statistic> elements
if tag.endswith('document-statistic'):
for tag_, text_ in m.items():
tag_ = unicode(tag_)
tag_ = tag_.replace(meta, u'')
text_ = unicode(text_).rstrip().lstrip()
self.metadata['meta:' + tag_] = text_
continue
#skip empty elements
if not m.text:
continue
self.metadata[prefix + ':' + tag] = text
def _addCssSelectorPrefix(self, htmlObject, selectorPrefix):
'''
This function takes the first style element of htmlObject (etree object)
and adds the selectorPrefix (string) to every CSS selector.
'''
style = None
style = htmlObject.find('.//{http://www.w3.org/1999/xhtml}style')
if not style is None:
text = style.text.replace('\n.', '\n' + selectorPrefix + '.')
style.text = text
| '''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e)) | identifier_body |
opendocument_html_xslt.py | # coding: utf-8
from logging import DEBUG
import zipfile
import os
import tempfile
import shutil
from StringIO import StringIO
from zope.interface import implements
from plone.transforms.interfaces import ITransform, IRankedTransform
from plone.transforms.message import PloneMessageFactory as _
from plone.transforms.transform import TransformResult
from plone.transforms.log import log
import plone.opendocument.utils as utils
HAS_LXML = True
try:
from lxml import etree
except ImportError:
HAS_LXML = False
class OpendocumentHtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
|
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag = tag.replace(dc, u'')
#<meta:user-defined> elements
if tag.endswith('user-defined'):
tag = unicode(m.get(\
'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}name'))
#<meta:document-statistic> elements
if tag.endswith('document-statistic'):
for tag_, text_ in m.items():
tag_ = unicode(tag_)
tag_ = tag_.replace(meta, u'')
text_ = unicode(text_).rstrip().lstrip()
self.metadata['meta:' + tag_] = text_
continue
#skip empty elements
if not m.text:
continue
self.metadata[prefix + ':' + tag] = text
def _addCssSelectorPrefix(self, htmlObject, selectorPrefix):
'''
This function takes the first style element of htmlObject (etree object)
and adds the selectorPrefix (string) to every CSS selector.
'''
style = None
style = htmlObject.find('.//{http://www.w3.org/1999/xhtml}style')
if not style is None:
text = style.text.replace('\n.', '\n' + selectorPrefix + '.')
style.text = text
| data_.write(chunk) | conditional_block |
opendocument_html_xslt.py | # coding: utf-8
from logging import DEBUG
import zipfile
import os
import tempfile
import shutil
from StringIO import StringIO
from zope.interface import implements
from plone.transforms.interfaces import ITransform, IRankedTransform
from plone.transforms.message import PloneMessageFactory as _
from plone.transforms.transform import TransformResult
from plone.transforms.log import log
import plone.opendocument.utils as utils
HAS_LXML = True
try:
from lxml import etree
except ImportError:
HAS_LXML = False
class OpendocumentHtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def | (self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag = tag.replace(dc, u'')
#<meta:user-defined> elements
if tag.endswith('user-defined'):
tag = unicode(m.get(\
'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}name'))
#<meta:document-statistic> elements
if tag.endswith('document-statistic'):
for tag_, text_ in m.items():
tag_ = unicode(tag_)
tag_ = tag_.replace(meta, u'')
text_ = unicode(text_).rstrip().lstrip()
self.metadata['meta:' + tag_] = text_
continue
#skip empty elements
if not m.text:
continue
self.metadata[prefix + ':' + tag] = text
def _addCssSelectorPrefix(self, htmlObject, selectorPrefix):
'''
This function takes the first style element of htmlObject (etree object)
and adds the selectorPrefix (string) to every CSS selector.
'''
style = None
style = htmlObject.find('.//{http://www.w3.org/1999/xhtml}style')
if not style is None:
text = style.text.replace('\n.', '\n' + selectorPrefix + '.')
style.text = text
| transform | identifier_name |
event.go | // Copyright 2014 Orchestrate, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gorc2
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// Internal type that represents the reply form a JSON event fetch.
type jsonEvent struct {
Ordinal int64 `json:"ordinal"`
Path jsonPath `json:"path"`
Timestamp int64 `json:"timestamp"`
Value json.RawMessage `json:"value"`
}
//
// AddEvent
//
// Adds a new event to the collection with the given key, and type. The
// timestamp of the new event will be set by the Orchestrate server to the
// time that the request was processed. Unlike Create this function will
// created an event even if an event already exists with that tuple. The
// new event will be given a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, nil, value)
}
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero has the
// effect of including all events with the same timestamp (leaving after
// to work like Start). The time will be truncated to miliseconds for
// the search.
After time.Time
AfterOrdinal int64
// Only include listing before this time stamp. Optionally you can include
// an ordinal as well which will be used if an event exists at the exact
// same ms as Before. The precision of this time value is in miliseconds.
Before time.Time
BeforeOrdinal int64
}
// Sets up a Events listing. This does not actually perform the query, that is
// done on the first call to Next() in the iterator. If opts is nil then
// default listing parameters are used, which will return all events and
// limits the query to 10 items at a time.
func (c *Collection) ListEvents(
key, typ string, opts *ListEventsQuery,
) *Iterator {
var path string
// Build a query from the user provided values.
if opts != nil {
query := make(url.Values, 10)
if opts.Limit != 0 {
query.Add("limit", strconv.Itoa(opts.Limit))
}
var defaultTime time.Time
if opts.After != defaultTime {
if opts.AfterOrdinal != 0 {
query.Add("afterEvent", fmt.Sprintf("%d/%d",
opts.After.UnixNano()/1000000, opts.AfterOrdinal))
} else {
query.Add("afterEvent",
strconv.FormatInt(opts.After.UnixNano()/1000000, 10))
}
}
if opts.Before != defaultTime {
if opts.BeforeOrdinal != 0 {
query.Add("beforeEvent", fmt.Sprintf("%d/%d",
opts.Before.UnixNano()/1000000, opts.BeforeOrdinal))
} else {
query.Add("beforeEvent",
strconv.FormatInt(opts.Before.UnixNano()/1000000, 10))
}
}
if opts.End != defaultTime {
if opts.EndOrdinal != 0 {
query.Add("endEvent", fmt.Sprintf("%d/%d",
opts.End.UnixNano()/1000000, opts.EndOrdinal))
} else {
query.Add("endEvent",
strconv.FormatInt(opts.End.UnixNano()/1000000, 10))
}
}
if opts.Start != defaultTime |
// Encode the path
path = c.Name + "/" + key + "/events/" + typ + "?" + query.Encode()
} else {
path = c.Name + "/" + key + "/events/" + typ
}
return &Iterator{
client: c.client,
iteratingEvents: true,
next: path,
}
}
| {
if opts.StartOrdinal != 0 {
query.Add("startEvent", fmt.Sprintf("%d/%d",
opts.Start.UnixNano()/1000000, opts.StartOrdinal))
} else {
query.Add("startEvent",
strconv.FormatInt(opts.Start.UnixNano()/1000000, 10))
}
} | conditional_block |
event.go | // Copyright 2014 Orchestrate, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gorc2
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// Internal type that represents the reply form a JSON event fetch.
type jsonEvent struct {
Ordinal int64 `json:"ordinal"`
Path jsonPath `json:"path"`
Timestamp int64 `json:"timestamp"`
Value json.RawMessage `json:"value"`
}
//
// AddEvent
//
// Adds a new event to the collection with the given key, and type. The
// timestamp of the new event will be set by the Orchestrate server to the
// time that the request was processed. Unlike Create this function will
// created an event even if an event already exists with that tuple. The
// new event will be given a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, nil, value)
}
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) | (
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero has the
// effect of including all events with the same timestamp (leaving after
// to work like Start). The time will be truncated to miliseconds for
// the search.
After time.Time
AfterOrdinal int64
// Only include listing before this time stamp. Optionally you can include
// an ordinal as well which will be used if an event exists at the exact
// same ms as Before. The precision of this time value is in miliseconds.
Before time.Time
BeforeOrdinal int64
}
// Sets up a Events listing. This does not actually perform the query, that is
// done on the first call to Next() in the iterator. If opts is nil then
// default listing parameters are used, which will return all events and
// limits the query to 10 items at a time.
func (c *Collection) ListEvents(
key, typ string, opts *ListEventsQuery,
) *Iterator {
var path string
// Build a query from the user provided values.
if opts != nil {
query := make(url.Values, 10)
if opts.Limit != 0 {
query.Add("limit", strconv.Itoa(opts.Limit))
}
var defaultTime time.Time
if opts.After != defaultTime {
if opts.AfterOrdinal != 0 {
query.Add("afterEvent", fmt.Sprintf("%d/%d",
opts.After.UnixNano()/1000000, opts.AfterOrdinal))
} else {
query.Add("afterEvent",
strconv.FormatInt(opts.After.UnixNano()/1000000, 10))
}
}
if opts.Before != defaultTime {
if opts.BeforeOrdinal != 0 {
query.Add("beforeEvent", fmt.Sprintf("%d/%d",
opts.Before.UnixNano()/1000000, opts.BeforeOrdinal))
} else {
query.Add("beforeEvent",
strconv.FormatInt(opts.Before.UnixNano()/1000000, 10))
}
}
if opts.End != defaultTime {
if opts.EndOrdinal != 0 {
query.Add("endEvent", fmt.Sprintf("%d/%d",
opts.End.UnixNano()/1000000, opts.EndOrdinal))
} else {
query.Add("endEvent",
strconv.FormatInt(opts.End.UnixNano()/1000000, 10))
}
}
if opts.Start != defaultTime {
if opts.StartOrdinal != 0 {
query.Add("startEvent", fmt.Sprintf("%d/%d",
opts.Start.UnixNano()/1000000, opts.StartOrdinal))
} else {
query.Add("startEvent",
strconv.FormatInt(opts.Start.UnixNano()/1000000, 10))
}
}
// Encode the path
path = c.Name + "/" + key + "/events/" + typ + "?" + query.Encode()
} else {
path = c.Name + "/" + key + "/events/" + typ
}
return &Iterator{
client: c.client,
iteratingEvents: true,
next: path,
}
}
| GetEvent | identifier_name |
event.go | // Copyright 2014 Orchestrate, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gorc2
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// Internal type that represents the reply form a JSON event fetch.
type jsonEvent struct {
Ordinal int64 `json:"ordinal"`
Path jsonPath `json:"path"`
Timestamp int64 `json:"timestamp"`
Value json.RawMessage `json:"value"`
}
//
// AddEvent
//
// Adds a new event to the collection with the given key, and type. The
// timestamp of the new event will be set by the Orchestrate server to the
// time that the request was processed. Unlike Create this function will
// created an event even if an event already exists with that tuple. The
// new event will be given a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, nil, value)
}
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key, | if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero has the
// effect of including all events with the same timestamp (leaving after
// to work like Start). The time will be truncated to miliseconds for
// the search.
After time.Time
AfterOrdinal int64
// Only include listing before this time stamp. Optionally you can include
// an ordinal as well which will be used if an event exists at the exact
// same ms as Before. The precision of this time value is in miliseconds.
Before time.Time
BeforeOrdinal int64
}
// Sets up a Events listing. This does not actually perform the query, that is
// done on the first call to Next() in the iterator. If opts is nil then
// default listing parameters are used, which will return all events and
// limits the query to 10 items at a time.
func (c *Collection) ListEvents(
key, typ string, opts *ListEventsQuery,
) *Iterator {
var path string
// Build a query from the user provided values.
if opts != nil {
query := make(url.Values, 10)
if opts.Limit != 0 {
query.Add("limit", strconv.Itoa(opts.Limit))
}
var defaultTime time.Time
if opts.After != defaultTime {
if opts.AfterOrdinal != 0 {
query.Add("afterEvent", fmt.Sprintf("%d/%d",
opts.After.UnixNano()/1000000, opts.AfterOrdinal))
} else {
query.Add("afterEvent",
strconv.FormatInt(opts.After.UnixNano()/1000000, 10))
}
}
if opts.Before != defaultTime {
if opts.BeforeOrdinal != 0 {
query.Add("beforeEvent", fmt.Sprintf("%d/%d",
opts.Before.UnixNano()/1000000, opts.BeforeOrdinal))
} else {
query.Add("beforeEvent",
strconv.FormatInt(opts.Before.UnixNano()/1000000, 10))
}
}
if opts.End != defaultTime {
if opts.EndOrdinal != 0 {
query.Add("endEvent", fmt.Sprintf("%d/%d",
opts.End.UnixNano()/1000000, opts.EndOrdinal))
} else {
query.Add("endEvent",
strconv.FormatInt(opts.End.UnixNano()/1000000, 10))
}
}
if opts.Start != defaultTime {
if opts.StartOrdinal != 0 {
query.Add("startEvent", fmt.Sprintf("%d/%d",
opts.Start.UnixNano()/1000000, opts.StartOrdinal))
} else {
query.Add("startEvent",
strconv.FormatInt(opts.Start.UnixNano()/1000000, 10))
}
}
// Encode the path
path = c.Name + "/" + key + "/events/" + typ + "?" + query.Encode()
} else {
path = c.Name + "/" + key + "/events/" + typ
}
return &Iterator{
client: c.client,
iteratingEvents: true,
next: path,
}
} | Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary. | random_line_split |
event.go | // Copyright 2014 Orchestrate, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gorc2
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// Internal type that represents the reply form a JSON event fetch.
type jsonEvent struct {
Ordinal int64 `json:"ordinal"`
Path jsonPath `json:"path"`
Timestamp int64 `json:"timestamp"`
Value json.RawMessage `json:"value"`
}
//
// AddEvent
//
// Adds a new event to the collection with the given key, and type. The
// timestamp of the new event will be set by the Orchestrate server to the
// time that the request was processed. Unlike Create this function will
// created an event even if an event already exists with that tuple. The
// new event will be given a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) |
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero has the
// effect of including all events with the same timestamp (leaving after
// to work like Start). The time will be truncated to miliseconds for
// the search.
After time.Time
AfterOrdinal int64
// Only include listing before this time stamp. Optionally you can include
// an ordinal as well which will be used if an event exists at the exact
// same ms as Before. The precision of this time value is in miliseconds.
Before time.Time
BeforeOrdinal int64
}
// Sets up a Events listing. This does not actually perform the query, that is
// done on the first call to Next() in the iterator. If opts is nil then
// default listing parameters are used, which will return all events and
// limits the query to 10 items at a time.
func (c *Collection) ListEvents(
key, typ string, opts *ListEventsQuery,
) *Iterator {
var path string
// Build a query from the user provided values.
if opts != nil {
query := make(url.Values, 10)
if opts.Limit != 0 {
query.Add("limit", strconv.Itoa(opts.Limit))
}
var defaultTime time.Time
if opts.After != defaultTime {
if opts.AfterOrdinal != 0 {
query.Add("afterEvent", fmt.Sprintf("%d/%d",
opts.After.UnixNano()/1000000, opts.AfterOrdinal))
} else {
query.Add("afterEvent",
strconv.FormatInt(opts.After.UnixNano()/1000000, 10))
}
}
if opts.Before != defaultTime {
if opts.BeforeOrdinal != 0 {
query.Add("beforeEvent", fmt.Sprintf("%d/%d",
opts.Before.UnixNano()/1000000, opts.BeforeOrdinal))
} else {
query.Add("beforeEvent",
strconv.FormatInt(opts.Before.UnixNano()/1000000, 10))
}
}
if opts.End != defaultTime {
if opts.EndOrdinal != 0 {
query.Add("endEvent", fmt.Sprintf("%d/%d",
opts.End.UnixNano()/1000000, opts.EndOrdinal))
} else {
query.Add("endEvent",
strconv.FormatInt(opts.End.UnixNano()/1000000, 10))
}
}
if opts.Start != defaultTime {
if opts.StartOrdinal != 0 {
query.Add("startEvent", fmt.Sprintf("%d/%d",
opts.Start.UnixNano()/1000000, opts.StartOrdinal))
} else {
query.Add("startEvent",
strconv.FormatInt(opts.Start.UnixNano()/1000000, 10))
}
}
// Encode the path
path = c.Name + "/" + key + "/events/" + typ + "?" + query.Encode()
} else {
path = c.Name + "/" + key + "/events/" + typ
}
return &Iterator{
client: c.client,
iteratingEvents: true,
next: path,
}
}
| {
return c.innerAddEvent(key, typ, nil, value)
} | identifier_body |
day15.js | /* eslint-disable no-console */
/* eslint-disable max-len */
const input = require('./input');
/**
* --- Day 15: Beverage Bandits ---
Having perfected their hot chocolate, the Elves have a new problem: the Goblins that live in these caves will do anything to steal it. Looks like they're here for a fight.
You scan the area, generating a map of the walls (#), open cavern (.), and starting position of every Goblin (G) and Elf (E) (your puzzle input).
Combat proceeds in rounds; in each round, each unit that is still alive takes a turn, resolving all of its actions before the next unit's turn begins. On each unit's turn, it tries to move into range of an enemy (if it isn't already) and then attack (if it is in range).
All units are very disciplined and always follow very strict combat rules. Units never move or attack diagonally, as doing so would be dishonorable. When multiple choices are equally valid, ties are broken in reading order: top-to-bottom, then left-to-right. For instance, the order in which units take their turns within a round is the reading order of their starting positions in that round, regardless of the type of unit or whether other units have moved after the round started. For example:
would take their
These units: turns in this order:
####### #######
#.G.E.# #.1.2.#
#E.G.E# #3.4.5#
#.G.E.# #.6.7.#
####### #######
Each unit begins its turn by identifying all possible targets (enemy units). If no targets remain, combat ends.
Then, the unit identifies all of the open squares (.) that are in range of each target; these are the squares which are adjacent (immediately up, down, left, or right) to any target and which aren't already occupied by a wall or another unit. Alternatively, the unit might already be in range of a target. If the unit is not already in range of a target, and there are no open squares which are in range of a target, the unit ends its turn.
If the unit is already in range of a target, it does not move, but continues its turn with an attack. Otherwise, since it is not in range of a target, it moves.
To move, the unit first considers the squares that are in range and determines which of those squares it could reach in the fewest steps. A step is a single movement to any adjacent (immediately up, down, left, or right) open (.) square. Units cannot move into walls or other units. The unit does this while considering the current positions of units and does not do any prediction about where units will be later. If the unit cannot reach (find an open path to) any of the squares that are in range, it ends its turn. If multiple squares are in range and tied for being reachable in the fewest steps, the step which is first in reading order is chosen. For example:
Targets: In range: Reachable: Nearest: Chosen:
####### ####### ####### ####### #######
#E..G.# #E.?G?# #E.@G.# #E.!G.# #E.+G.#
#...#.# --> #.?.#?# --> #.@.#.# --> #.!.#.# --> #...#.#
#.G.#G# #?G?#G# #@G@#G# #!G.#G# #.G.#G#
####### ####### ####### ####### #######
In the above scenario, the Elf has three targets (the three Goblins):
Each of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).
Of those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or unit to reach.
Three of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).
Of those, the square which is first in reading order is chosen (+).
The unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This requires knowing when there is more than one shortest path so that you can consider the first step of each such path.) For example:
In range: Nearest: Chosen: Distance: Step:
####### ####### ####### ####### #######
#.E...# #.E...# #.E...# #4E212# #..E..#
#...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#
#..?G?# #..!G.# #...G.# #432G2# #...G.#
####### ####### ####### ####### #######
The Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is chosen (+). Under "Distance", each open square is marked with its distance from the destination square; the two squares to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2 steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right one square.
Here's a larger example of movement:
Initially:
#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########
After 1 round:
#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########
After 2 rounds:
#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########
After 3 rounds:
#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########
Once the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square in range of a target, and so none of the units can move until a unit dies.
After moving (or if the unit began its turn in range of a target), the unit attacks.
To attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.
The unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.
Each unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.
For example, suppose the only Elf is about to attack:
HP: HP:
G.... 9 G.... 9
..G.. 4 ..G.. 4
..EG. 2 --> ..E..
..G.. 2 ..G.. 2
...G. 1 ...G. 1
The "HP" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it (also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
*/
/**
* Could keep map as lines of strings like in Day 13?
* Would require repeated loops for target acquisition/distance/reading order determination?
* Create nodes linking each instead
* Create fighter class
* Distinguish friend and foe - probably just extend fighter
* -or just compare chars? though modifying elves/goblins separately might be needed for part 2
* Separate fighter round into phases
* -acquire target
* -attack
* -move
* Find shortest path
* Choose by reading order upon multiple
* Break out of round when any single fighter finds no enemies left or deduct 1 from final round count
*/
const mapStrings = input.day15.split('\n');
class Node {
constructor(x, y, char) {
// Might not end up needing all of these
this.x = x;
this.y = y;
this.char = char === '#' ? char : '.';
this.isTraversable = char !== '#';
this.occupant = null; // Need something like this to not traverse occupied nodes + acquire target
// Link these after map is generated?
this.left = null;
this.right = null;
this.up = null;
this.down = null;
this.directions = ['left', 'up', 'right', 'down'];
}
isAdjacent(target) {
// this returns true if diagonal
// return Math.abs(this.x - target.x) === 1 || Math.abs(this.y - target.y) === 1;
}
getDistance({ x, y }) {
return Math.abs(this.x - x) + Math.abs(this.y - y);
}
| ({ x, y }) {}
}
function generateMap(lines = mapStrings) {
const nodes = lines.map((line, y) => [...line].map((char, x) => new Node(x, y, char)));
nodes.forEach(line => line.forEach((node) => {
if (nodes[node.x - 1]) node.left = nodes[node.y][node.x - 1];
if (nodes[node.y - 1]) node.up = nodes[node.y - 1][node.x];
if (nodes[node.x + 1]) node.right = nodes[node.y][node.x + 1];
if (nodes[node.y + 1]) node.down = nodes[node.y + 1][node.x - 1];
// node.surroundings = [node.left, node.right, node.up, node.down];
}));
return nodes;
}
class Fighter {
// Use nodes instead of x,y for location?
constructor(x, y, map) {
this.location = map[y][x]; // Grab node with given x,y from however we're storing them
// this.char = char; // To be used to determine allegiance? or just set side based on char without storing char
this.hp = 200;
this.ap = 3;
this.isAlive = true;
this.location.occupant = this;
}
takeDamage(n) {
this.hp -= n;
if (this.hp <= 0) this.isAlive = false;
}
attack(target) {
target.takeDamage(this.ap);
}
// move(...directions) { // single items
// take in array of directions
move(directions) {
directions.forEach((direction) => {
if (this.location[direction].occupant) throw new Error('Trying to move into occupied spot');
this.location.occupant = null;
this.location = this.location[direction];
this.location.occupant = this;
});
}
// use this in acquireTarget instead?
getSurroundingDistances(target) {
return this.location.directions.map(direction => (target.location[direction].isTraversable && !target.location[direction].occupant
? { direction: this.location.getDistance(target.location[direction]) }
: null));
}
acquireTarget(enemies) {
// BFS to get closest target(s) ?
// todo get distance to nodes around the enemy, not the enemy location itself
const targetDistances = enemies
.map(enemy => ({ target: enemy, distance: this.location.getDistance(enemy.location) }))
.sort((x, y) => x.distance - y.distance);
if (targetDistances[0].distance < targetDistances[1].distance) return targetDistances[0].target;
const equidistantTargets = targetDistances.filter(
enemy => enemy.distance === targetDistances[0].distance,
);
// Determine reading order in case of multiple
}
attackPhase(target) {
// Target should already be acquired
// Subtract ap from targets hp
// todo revisit each phase's structure
}
}
class Goblin extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Elf;
}
}
class Elf extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Goblin;
}
}
console.log(generateMap()[0][0].surroundings);
| getPath | identifier_name |
day15.js | /* eslint-disable no-console */
/* eslint-disable max-len */
const input = require('./input');
/**
* --- Day 15: Beverage Bandits ---
Having perfected their hot chocolate, the Elves have a new problem: the Goblins that live in these caves will do anything to steal it. Looks like they're here for a fight.
You scan the area, generating a map of the walls (#), open cavern (.), and starting position of every Goblin (G) and Elf (E) (your puzzle input).
Combat proceeds in rounds; in each round, each unit that is still alive takes a turn, resolving all of its actions before the next unit's turn begins. On each unit's turn, it tries to move into range of an enemy (if it isn't already) and then attack (if it is in range).
All units are very disciplined and always follow very strict combat rules. Units never move or attack diagonally, as doing so would be dishonorable. When multiple choices are equally valid, ties are broken in reading order: top-to-bottom, then left-to-right. For instance, the order in which units take their turns within a round is the reading order of their starting positions in that round, regardless of the type of unit or whether other units have moved after the round started. For example:
would take their
These units: turns in this order:
####### #######
#.G.E.# #.1.2.#
#E.G.E# #3.4.5#
#.G.E.# #.6.7.#
####### #######
Each unit begins its turn by identifying all possible targets (enemy units). If no targets remain, combat ends.
Then, the unit identifies all of the open squares (.) that are in range of each target; these are the squares which are adjacent (immediately up, down, left, or right) to any target and which aren't already occupied by a wall or another unit. Alternatively, the unit might already be in range of a target. If the unit is not already in range of a target, and there are no open squares which are in range of a target, the unit ends its turn.
If the unit is already in range of a target, it does not move, but continues its turn with an attack. Otherwise, since it is not in range of a target, it moves.
To move, the unit first considers the squares that are in range and determines which of those squares it could reach in the fewest steps. A step is a single movement to any adjacent (immediately up, down, left, or right) open (.) square. Units cannot move into walls or other units. The unit does this while considering the current positions of units and does not do any prediction about where units will be later. If the unit cannot reach (find an open path to) any of the squares that are in range, it ends its turn. If multiple squares are in range and tied for being reachable in the fewest steps, the step which is first in reading order is chosen. For example:
Targets: In range: Reachable: Nearest: Chosen:
####### ####### ####### ####### #######
#E..G.# #E.?G?# #E.@G.# #E.!G.# #E.+G.#
#...#.# --> #.?.#?# --> #.@.#.# --> #.!.#.# --> #...#.#
#.G.#G# #?G?#G# #@G@#G# #!G.#G# #.G.#G#
####### ####### ####### ####### #######
In the above scenario, the Elf has three targets (the three Goblins):
Each of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).
Of those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or unit to reach.
Three of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).
Of those, the square which is first in reading order is chosen (+).
The unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This requires knowing when there is more than one shortest path so that you can consider the first step of each such path.) For example:
In range: Nearest: Chosen: Distance: Step:
####### ####### ####### ####### #######
#.E...# #.E...# #.E...# #4E212# #..E..#
#...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#
#..?G?# #..!G.# #...G.# #432G2# #...G.#
####### ####### ####### ####### #######
The Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is chosen (+). Under "Distance", each open square is marked with its distance from the destination square; the two squares to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2 steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right one square.
Here's a larger example of movement:
Initially:
#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########
After 1 round:
#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########
After 2 rounds:
#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########
After 3 rounds:
#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########
Once the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square in range of a target, and so none of the units can move until a unit dies.
After moving (or if the unit began its turn in range of a target), the unit attacks.
To attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.
The unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.
Each unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.
For example, suppose the only Elf is about to attack:
HP: HP:
G.... 9 G.... 9
..G.. 4 ..G.. 4
..EG. 2 --> ..E..
..G.. 2 ..G.. 2
...G. 1 ...G. 1
The "HP" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it (also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
*/
/**
* Could keep map as lines of strings like in Day 13?
* Would require repeated loops for target acquisition/distance/reading order determination?
* Create nodes linking each instead
* Create fighter class
* Distinguish friend and foe - probably just extend fighter
* -or just compare chars? though modifying elves/goblins separately might be needed for part 2
* Separate fighter round into phases
* -acquire target
* -attack
* -move
* Find shortest path
* Choose by reading order upon multiple
* Break out of round when any single fighter finds no enemies left or deduct 1 from final round count
*/
const mapStrings = input.day15.split('\n');
class Node {
constructor(x, y, char) {
// Might not end up needing all of these
this.x = x;
this.y = y;
this.char = char === '#' ? char : '.';
this.isTraversable = char !== '#';
this.occupant = null; // Need something like this to not traverse occupied nodes + acquire target
// Link these after map is generated?
this.left = null;
this.right = null;
this.up = null;
this.down = null;
this.directions = ['left', 'up', 'right', 'down'];
}
isAdjacent(target) {
// this returns true if diagonal
// return Math.abs(this.x - target.x) === 1 || Math.abs(this.y - target.y) === 1;
}
getDistance({ x, y }) {
return Math.abs(this.x - x) + Math.abs(this.y - y);
}
getPath({ x, y }) {}
}
function generateMap(lines = mapStrings) {
const nodes = lines.map((line, y) => [...line].map((char, x) => new Node(x, y, char)));
nodes.forEach(line => line.forEach((node) => {
if (nodes[node.x - 1]) node.left = nodes[node.y][node.x - 1];
if (nodes[node.y - 1]) node.up = nodes[node.y - 1][node.x];
if (nodes[node.x + 1]) node.right = nodes[node.y][node.x + 1];
if (nodes[node.y + 1]) node.down = nodes[node.y + 1][node.x - 1];
// node.surroundings = [node.left, node.right, node.up, node.down];
}));
return nodes;
}
class Fighter {
// Use nodes instead of x,y for location?
constructor(x, y, map) {
this.location = map[y][x]; // Grab node with given x,y from however we're storing them
// this.char = char; // To be used to determine allegiance? or just set side based on char without storing char
this.hp = 200;
this.ap = 3;
this.isAlive = true;
this.location.occupant = this;
}
takeDamage(n) {
this.hp -= n;
if (this.hp <= 0) this.isAlive = false;
}
attack(target) {
target.takeDamage(this.ap);
}
// move(...directions) { // single items
// take in array of directions
move(directions) {
directions.forEach((direction) => {
if (this.location[direction].occupant) throw new Error('Trying to move into occupied spot');
this.location.occupant = null;
this.location = this.location[direction];
this.location.occupant = this;
});
}
// use this in acquireTarget instead?
getSurroundingDistances(target) {
return this.location.directions.map(direction => (target.location[direction].isTraversable && !target.location[direction].occupant
? { direction: this.location.getDistance(target.location[direction]) }
: null));
}
acquireTarget(enemies) {
// BFS to get closest target(s) ?
// todo get distance to nodes around the enemy, not the enemy location itself
const targetDistances = enemies
.map(enemy => ({ target: enemy, distance: this.location.getDistance(enemy.location) }))
.sort((x, y) => x.distance - y.distance);
if (targetDistances[0].distance < targetDistances[1].distance) return targetDistances[0].target;
const equidistantTargets = targetDistances.filter(
enemy => enemy.distance === targetDistances[0].distance,
);
// Determine reading order in case of multiple
}
attackPhase(target) {
// Target should already be acquired
// Subtract ap from targets hp
// todo revisit each phase's structure
}
}
class Goblin extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Elf;
}
}
class Elf extends Fighter {
constructor(x, y, map) |
}
console.log(generateMap()[0][0].surroundings);
| {
super(x, y, map);
this.enemy = Goblin;
} | identifier_body |
day15.js | /* eslint-disable no-console */
/* eslint-disable max-len */
const input = require('./input');
/**
* --- Day 15: Beverage Bandits ---
Having perfected their hot chocolate, the Elves have a new problem: the Goblins that live in these caves will do anything to steal it. Looks like they're here for a fight.
You scan the area, generating a map of the walls (#), open cavern (.), and starting position of every Goblin (G) and Elf (E) (your puzzle input).
Combat proceeds in rounds; in each round, each unit that is still alive takes a turn, resolving all of its actions before the next unit's turn begins. On each unit's turn, it tries to move into range of an enemy (if it isn't already) and then attack (if it is in range).
All units are very disciplined and always follow very strict combat rules. Units never move or attack diagonally, as doing so would be dishonorable. When multiple choices are equally valid, ties are broken in reading order: top-to-bottom, then left-to-right. For instance, the order in which units take their turns within a round is the reading order of their starting positions in that round, regardless of the type of unit or whether other units have moved after the round started. For example:
would take their
These units: turns in this order:
####### #######
#.G.E.# #.1.2.#
#E.G.E# #3.4.5#
#.G.E.# #.6.7.#
####### #######
Each unit begins its turn by identifying all possible targets (enemy units). If no targets remain, combat ends.
Then, the unit identifies all of the open squares (.) that are in range of each target; these are the squares which are adjacent (immediately up, down, left, or right) to any target and which aren't already occupied by a wall or another unit. Alternatively, the unit might already be in range of a target. If the unit is not already in range of a target, and there are no open squares which are in range of a target, the unit ends its turn.
If the unit is already in range of a target, it does not move, but continues its turn with an attack. Otherwise, since it is not in range of a target, it moves.
To move, the unit first considers the squares that are in range and determines which of those squares it could reach in the fewest steps. A step is a single movement to any adjacent (immediately up, down, left, or right) open (.) square. Units cannot move into walls or other units. The unit does this while considering the current positions of units and does not do any prediction about where units will be later. If the unit cannot reach (find an open path to) any of the squares that are in range, it ends its turn. If multiple squares are in range and tied for being reachable in the fewest steps, the step which is first in reading order is chosen. For example:
Targets: In range: Reachable: Nearest: Chosen:
####### ####### ####### ####### #######
#E..G.# #E.?G?# #E.@G.# #E.!G.# #E.+G.#
#...#.# --> #.?.#?# --> #.@.#.# --> #.!.#.# --> #...#.#
#.G.#G# #?G?#G# #@G@#G# #!G.#G# #.G.#G#
####### ####### ####### ####### #######
In the above scenario, the Elf has three targets (the three Goblins):
Each of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).
Of those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or unit to reach.
Three of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).
Of those, the square which is first in reading order is chosen (+).
The unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This requires knowing when there is more than one shortest path so that you can consider the first step of each such path.) For example:
In range: Nearest: Chosen: Distance: Step:
####### ####### ####### ####### #######
#.E...# #.E...# #.E...# #4E212# #..E..#
#...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#
#..?G?# #..!G.# #...G.# #432G2# #...G.#
####### ####### ####### ####### #######
The Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is chosen (+). Under "Distance", each open square is marked with its distance from the destination square; the two squares to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2 steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right one square.
Here's a larger example of movement:
Initially:
#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########
After 1 round:
#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########
After 2 rounds:
#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########
After 3 rounds:
#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########
Once the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square in range of a target, and so none of the units can move until a unit dies.
After moving (or if the unit began its turn in range of a target), the unit attacks.
To attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.
The unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.
Each unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.
For example, suppose the only Elf is about to attack:
HP: HP:
G.... 9 G.... 9
..G.. 4 ..G.. 4
..EG. 2 --> ..E..
..G.. 2 ..G.. 2
...G. 1 ...G. 1
The "HP" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it (also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200) | ####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
*/
/**
* Could keep map as lines of strings like in Day 13?
* Would require repeated loops for target acquisition/distance/reading order determination?
* Create nodes linking each instead
* Create fighter class
* Distinguish friend and foe - probably just extend fighter
* -or just compare chars? though modifying elves/goblins separately might be needed for part 2
* Separate fighter round into phases
* -acquire target
* -attack
* -move
* Find shortest path
* Choose by reading order upon multiple
* Break out of round when any single fighter finds no enemies left or deduct 1 from final round count
*/
const mapStrings = input.day15.split('\n');
class Node {
constructor(x, y, char) {
// Might not end up needing all of these
this.x = x;
this.y = y;
this.char = char === '#' ? char : '.';
this.isTraversable = char !== '#';
this.occupant = null; // Need something like this to not traverse occupied nodes + acquire target
// Link these after map is generated?
this.left = null;
this.right = null;
this.up = null;
this.down = null;
this.directions = ['left', 'up', 'right', 'down'];
}
isAdjacent(target) {
// this returns true if diagonal
// return Math.abs(this.x - target.x) === 1 || Math.abs(this.y - target.y) === 1;
}
getDistance({ x, y }) {
return Math.abs(this.x - x) + Math.abs(this.y - y);
}
getPath({ x, y }) {}
}
function generateMap(lines = mapStrings) {
const nodes = lines.map((line, y) => [...line].map((char, x) => new Node(x, y, char)));
nodes.forEach(line => line.forEach((node) => {
if (nodes[node.x - 1]) node.left = nodes[node.y][node.x - 1];
if (nodes[node.y - 1]) node.up = nodes[node.y - 1][node.x];
if (nodes[node.x + 1]) node.right = nodes[node.y][node.x + 1];
if (nodes[node.y + 1]) node.down = nodes[node.y + 1][node.x - 1];
// node.surroundings = [node.left, node.right, node.up, node.down];
}));
return nodes;
}
class Fighter {
// Use nodes instead of x,y for location?
constructor(x, y, map) {
this.location = map[y][x]; // Grab node with given x,y from however we're storing them
// this.char = char; // To be used to determine allegiance? or just set side based on char without storing char
this.hp = 200;
this.ap = 3;
this.isAlive = true;
this.location.occupant = this;
}
takeDamage(n) {
this.hp -= n;
if (this.hp <= 0) this.isAlive = false;
}
attack(target) {
target.takeDamage(this.ap);
}
// move(...directions) { // single items
// take in array of directions
move(directions) {
directions.forEach((direction) => {
if (this.location[direction].occupant) throw new Error('Trying to move into occupied spot');
this.location.occupant = null;
this.location = this.location[direction];
this.location.occupant = this;
});
}
// use this in acquireTarget instead?
getSurroundingDistances(target) {
return this.location.directions.map(direction => (target.location[direction].isTraversable && !target.location[direction].occupant
? { direction: this.location.getDistance(target.location[direction]) }
: null));
}
acquireTarget(enemies) {
// BFS to get closest target(s) ?
// todo get distance to nodes around the enemy, not the enemy location itself
const targetDistances = enemies
.map(enemy => ({ target: enemy, distance: this.location.getDistance(enemy.location) }))
.sort((x, y) => x.distance - y.distance);
if (targetDistances[0].distance < targetDistances[1].distance) return targetDistances[0].target;
const equidistantTargets = targetDistances.filter(
enemy => enemy.distance === targetDistances[0].distance,
);
// Determine reading order in case of multiple
}
attackPhase(target) {
// Target should already be acquired
// Subtract ap from targets hp
// todo revisit each phase's structure
}
}
class Goblin extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Elf;
}
}
class Elf extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Goblin;
}
}
console.log(generateMap()[0][0].surroundings); | #######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
| random_line_split |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn | () -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| connector_kafka_producer | identifier_name |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event | "field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
} | let batched_data = literal!([{
"data": {
"value": { | random_line_split |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> |
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
} | identifier_body |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => |
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| {
error!("Error creating topic {}: {}", &topic, err);
} | conditional_block |
enforsbot.py | #!/usr/bin/env python3
"enforsbot.py by Christer Enfors (c) 2015, 2016, 2017"
from __future__ import print_function
import datetime
import re
import socket
import subprocess
import sqlite3
import eb_activity
import eb_config
import eb_cmds_loader
import eb_irc
import eb_math
import eb_message
import eb_parser
import eb_telegram
import eb_twitter
import eb_user
#twitter_thread = eb_twitter.TwitterThread()
SYSCOND_DIR = "/home/enfors/syscond"
class EnforsBot(object):
"The main class of the application."
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# I see no problem with them.
def __init__(self):
self.config = eb_config.Config()
self.cmds_loader = eb_cmds_loader.CmdsLoader(["user", "admin"])
self.cmd_parser = eb_parser.CmdParser(self.cmds_loader)
# Responses are regexps.
self.responses = {
"ip" : self.respond_ip,
"what.* ip .*" : self.respond_ip,
"what.*address.*" : self.respond_ip,
".*good morning.*" : "Good morning!",
".*good afternoon.*" : "Good afternoon!",
".*good evening.*" : "Good evening!",
".*good night.*" : "Good night!",
"thank.*" : "You're welcome.",
"test" : "I am running and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
|
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location left: %s" % location)
self.arrived = False
return None
def handle_incoming_notify_user(self, message):
"Send notification message through Twitter."
out_message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": message.data["user"],
"text": message.data["text"]})
self.config.send_message("TwitterRest", out_message)
def respond_location(self, message):
"Return our location."
with self.database:
cur = self.database.cursor()
cur.execute("select * from LOCATION_HISTORY "
"order by ROWID desc limit 1")
try:
(user, location, event, timestamp) = cur.fetchone()
except TypeError:
return "I have no information on that."
if event == "arrived":
return "%s %s at %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
return "%s %s %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
def respond_syscond(self, message):
"Return the SysCond status of the host."
return self.check_syscond()
def respond_status(self, message):
"Return threads status."
output = ""
for thread in self.config.threads:
output += "%s: %s\n" % (thread,
self.config.get_thread_state(thread))
return output
def respond_lights_on(self, message):
"Turn the lights on in my house."
subprocess.call(["lights", "on"])
return "Lights have been turned ON."
def respond_lights_off(self, message):
"Turn the lights out in my house."
subprocess.call(["lights", "off"])
return "Lights have been turned OFF."
def check_syscond(self):
"Check the SysCond status of the host."
try:
syscond_output = subprocess.Popen(["syscond", "status", "-n"],
stdout=subprocess.PIPE).\
communicate()[0]
return syscond_output.decode("utf-8")
except FileNotFoundError:
return "SysCond is not installed on this host."
def get_datetime_diff_string(self, date1, date2):
"Return the diff between two datetimes, in short format."
if date1 > date2:
return "in the future"
diff = date2 - date1
total_seconds = diff.total_seconds()
minutes = total_seconds // 60
hours = total_seconds // 60 // 60
days = total_seconds // 60 // 60 // 24
if days:
hours -= (days * 24)
return "%d %s, %d %s ago" % (days,
self.get_possible_plural("day", days),
hours,
self.get_possible_plural("hour",
hours))
elif hours:
minutes -= (hours * 60)
return "%d %s, %d %s ago" % (hours,
self.get_possible_plural("hour",
hours),
minutes,
self.get_possible_plural("minute",
minutes))
elif minutes:
return "%d %s ago" % (minutes, self.get_possible_plural("minute",
minutes))
else:
return "just now"
def get_possible_plural(self, word, num):
"Return word+s if num is plural, otherwise word."
if num == 1:
return word
else:
return word + "s"
def main():
"Start the application."
app = EnforsBot()
app.start()
if __name__ == "__main__":
main()
| pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text) | conditional_block |
enforsbot.py | #!/usr/bin/env python3
"enforsbot.py by Christer Enfors (c) 2015, 2016, 2017"
from __future__ import print_function
import datetime
import re
import socket
import subprocess
import sqlite3
import eb_activity
import eb_config
import eb_cmds_loader
import eb_irc
import eb_math
import eb_message
import eb_parser
import eb_telegram
import eb_twitter
import eb_user
#twitter_thread = eb_twitter.TwitterThread()
SYSCOND_DIR = "/home/enfors/syscond"
class EnforsBot(object):
"The main class of the application."
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# I see no problem with them.
def __init__(self):
self.config = eb_config.Config()
self.cmds_loader = eb_cmds_loader.CmdsLoader(["user", "admin"])
self.cmd_parser = eb_parser.CmdParser(self.cmds_loader)
# Responses are regexps.
self.responses = {
"ip" : self.respond_ip,
"what.* ip .*" : self.respond_ip,
"what.*address.*" : self.respond_ip,
".*good morning.*" : "Good morning!",
".*good afternoon.*" : "Good afternoon!",
".*good evening.*" : "Good evening!",
".*good night.*" : "Good night!",
"thank.*" : "You're welcome.",
"test" : "I am running and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
| message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location left: %s" % location)
self.arrived = False
return None
def handle_incoming_notify_user(self, message):
"Send notification message through Twitter."
out_message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": message.data["user"],
"text": message.data["text"]})
self.config.send_message("TwitterRest", out_message)
def respond_location(self, message):
"Return our location."
with self.database:
cur = self.database.cursor()
cur.execute("select * from LOCATION_HISTORY "
"order by ROWID desc limit 1")
try:
(user, location, event, timestamp) = cur.fetchone()
except TypeError:
return "I have no information on that."
if event == "arrived":
return "%s %s at %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
return "%s %s %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
def respond_syscond(self, message):
"Return the SysCond status of the host."
return self.check_syscond()
def respond_status(self, message):
"Return threads status."
output = ""
for thread in self.config.threads:
output += "%s: %s\n" % (thread,
self.config.get_thread_state(thread))
return output
def respond_lights_on(self, message):
"Turn the lights on in my house."
subprocess.call(["lights", "on"])
return "Lights have been turned ON."
def respond_lights_off(self, message):
"Turn the lights out in my house."
subprocess.call(["lights", "off"])
return "Lights have been turned OFF."
def check_syscond(self):
"Check the SysCond status of the host."
try:
syscond_output = subprocess.Popen(["syscond", "status", "-n"],
stdout=subprocess.PIPE).\
communicate()[0]
return syscond_output.decode("utf-8")
except FileNotFoundError:
return "SysCond is not installed on this host."
def get_datetime_diff_string(self, date1, date2):
"Return the diff between two datetimes, in short format."
if date1 > date2:
return "in the future"
diff = date2 - date1
total_seconds = diff.total_seconds()
minutes = total_seconds // 60
hours = total_seconds // 60 // 60
days = total_seconds // 60 // 60 // 24
if days:
hours -= (days * 24)
return "%d %s, %d %s ago" % (days,
self.get_possible_plural("day", days),
hours,
self.get_possible_plural("hour",
hours))
elif hours:
minutes -= (hours * 60)
return "%d %s, %d %s ago" % (hours,
self.get_possible_plural("hour",
hours),
minutes,
self.get_possible_plural("minute",
minutes))
elif minutes:
return "%d %s ago" % (minutes, self.get_possible_plural("minute",
minutes))
else:
return "just now"
def get_possible_plural(self, word, num):
"Return word+s if num is plural, otherwise word."
if num == 1:
return word
else:
return word + "s"
def main():
"Start the application."
app = EnforsBot()
app.start()
if __name__ == "__main__":
main() | # Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " ")) | random_line_split |
enforsbot.py | #!/usr/bin/env python3
"enforsbot.py by Christer Enfors (c) 2015, 2016, 2017"
from __future__ import print_function
import datetime
import re
import socket
import subprocess
import sqlite3
import eb_activity
import eb_config
import eb_cmds_loader
import eb_irc
import eb_math
import eb_message
import eb_parser
import eb_telegram
import eb_twitter
import eb_user
#twitter_thread = eb_twitter.TwitterThread()
SYSCOND_DIR = "/home/enfors/syscond"
class EnforsBot(object):
"The main class of the application."
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# I see no problem with them.
def __init__(self):
self.config = eb_config.Config()
self.cmds_loader = eb_cmds_loader.CmdsLoader(["user", "admin"])
self.cmd_parser = eb_parser.CmdParser(self.cmds_loader)
# Responses are regexps.
self.responses = {
"ip" : self.respond_ip,
"what.* ip .*" : self.respond_ip,
"what.*address.*" : self.respond_ip,
".*good morning.*" : "Good morning!",
".*good afternoon.*" : "Good afternoon!",
".*good evening.*" : "Good evening!",
".*good night.*" : "Good night!",
"thank.*" : "You're welcome.",
"test" : "I am running and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def | (user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location left: %s" % location)
self.arrived = False
return None
def handle_incoming_notify_user(self, message):
"Send notification message through Twitter."
out_message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": message.data["user"],
"text": message.data["text"]})
self.config.send_message("TwitterRest", out_message)
def respond_location(self, message):
"Return our location."
with self.database:
cur = self.database.cursor()
cur.execute("select * from LOCATION_HISTORY "
"order by ROWID desc limit 1")
try:
(user, location, event, timestamp) = cur.fetchone()
except TypeError:
return "I have no information on that."
if event == "arrived":
return "%s %s at %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
return "%s %s %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
def respond_syscond(self, message):
"Return the SysCond status of the host."
return self.check_syscond()
def respond_status(self, message):
"Return threads status."
output = ""
for thread in self.config.threads:
output += "%s: %s\n" % (thread,
self.config.get_thread_state(thread))
return output
def respond_lights_on(self, message):
"Turn the lights on in my house."
subprocess.call(["lights", "on"])
return "Lights have been turned ON."
def respond_lights_off(self, message):
"Turn the lights out in my house."
subprocess.call(["lights", "off"])
return "Lights have been turned OFF."
def check_syscond(self):
"Check the SysCond status of the host."
try:
syscond_output = subprocess.Popen(["syscond", "status", "-n"],
stdout=subprocess.PIPE).\
communicate()[0]
return syscond_output.decode("utf-8")
except FileNotFoundError:
return "SysCond is not installed on this host."
def get_datetime_diff_string(self, date1, date2):
"Return the diff between two datetimes, in short format."
if date1 > date2:
return "in the future"
diff = date2 - date1
total_seconds = diff.total_seconds()
minutes = total_seconds // 60
hours = total_seconds // 60 // 60
days = total_seconds // 60 // 60 // 24
if days:
hours -= (days * 24)
return "%d %s, %d %s ago" % (days,
self.get_possible_plural("day", days),
hours,
self.get_possible_plural("hour",
hours))
elif hours:
minutes -= (hours * 60)
return "%d %s, %d %s ago" % (hours,
self.get_possible_plural("hour",
hours),
minutes,
self.get_possible_plural("minute",
minutes))
elif minutes:
return "%d %s ago" % (minutes, self.get_possible_plural("minute",
minutes))
else:
return "just now"
def get_possible_plural(self, word, num):
"Return word+s if num is plural, otherwise word."
if num == 1:
return word
else:
return word + "s"
def main():
"Start the application."
app = EnforsBot()
app.start()
if __name__ == "__main__":
main()
| handle_activity | identifier_name |
enforsbot.py | #!/usr/bin/env python3
"enforsbot.py by Christer Enfors (c) 2015, 2016, 2017"
from __future__ import print_function
import datetime
import re
import socket
import subprocess
import sqlite3
import eb_activity
import eb_config
import eb_cmds_loader
import eb_irc
import eb_math
import eb_message
import eb_parser
import eb_telegram
import eb_twitter
import eb_user
#twitter_thread = eb_twitter.TwitterThread()
SYSCOND_DIR = "/home/enfors/syscond"
class EnforsBot(object):
"The main class of the application."
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# I see no problem with them.
def __init__(self):
self.config = eb_config.Config()
self.cmds_loader = eb_cmds_loader.CmdsLoader(["user", "admin"])
self.cmd_parser = eb_parser.CmdParser(self.cmds_loader)
# Responses are regexps.
self.responses = {
"ip" : self.respond_ip,
"what.* ip .*" : self.respond_ip,
"what.*address.*" : self.respond_ip,
".*good morning.*" : "Good morning!",
".*good afternoon.*" : "Good afternoon!",
".*good evening.*" : "Good evening!",
".*good night.*" : "Good night!",
"thank.*" : "You're welcome.",
"test" : "I am running and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location left: %s" % location)
self.arrived = False
return None
def handle_incoming_notify_user(self, message):
"Send notification message through Twitter."
out_message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": message.data["user"],
"text": message.data["text"]})
self.config.send_message("TwitterRest", out_message)
def respond_location(self, message):
"Return our location."
with self.database:
cur = self.database.cursor()
cur.execute("select * from LOCATION_HISTORY "
"order by ROWID desc limit 1")
try:
(user, location, event, timestamp) = cur.fetchone()
except TypeError:
return "I have no information on that."
if event == "arrived":
return "%s %s at %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
return "%s %s %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
def respond_syscond(self, message):
"Return the SysCond status of the host."
return self.check_syscond()
def respond_status(self, message):
|
def respond_lights_on(self, message):
"Turn the lights on in my house."
subprocess.call(["lights", "on"])
return "Lights have been turned ON."
def respond_lights_off(self, message):
"Turn the lights out in my house."
subprocess.call(["lights", "off"])
return "Lights have been turned OFF."
def check_syscond(self):
"Check the SysCond status of the host."
try:
syscond_output = subprocess.Popen(["syscond", "status", "-n"],
stdout=subprocess.PIPE).\
communicate()[0]
return syscond_output.decode("utf-8")
except FileNotFoundError:
return "SysCond is not installed on this host."
def get_datetime_diff_string(self, date1, date2):
"Return the diff between two datetimes, in short format."
if date1 > date2:
return "in the future"
diff = date2 - date1
total_seconds = diff.total_seconds()
minutes = total_seconds // 60
hours = total_seconds // 60 // 60
days = total_seconds // 60 // 60 // 24
if days:
hours -= (days * 24)
return "%d %s, %d %s ago" % (days,
self.get_possible_plural("day", days),
hours,
self.get_possible_plural("hour",
hours))
elif hours:
minutes -= (hours * 60)
return "%d %s, %d %s ago" % (hours,
self.get_possible_plural("hour",
hours),
minutes,
self.get_possible_plural("minute",
minutes))
elif minutes:
return "%d %s ago" % (minutes, self.get_possible_plural("minute",
minutes))
else:
return "just now"
def get_possible_plural(self, word, num):
"Return word+s if num is plural, otherwise word."
if num == 1:
return word
else:
return word + "s"
def main():
"Start the application."
app = EnforsBot()
app.start()
if __name__ == "__main__":
main()
| "Return threads status."
output = ""
for thread in self.config.threads:
output += "%s: %s\n" % (thread,
self.config.get_thread_state(thread))
return output | identifier_body |
index.ts | import { GameProfile } from "@xmcl/common";
import { fetchBuffer, fetchJson, got } from "@xmcl/net";
import { vfs } from "@xmcl/util";
import ByteBuffer from "bytebuffer";
import * as crypto from "crypto";
import * as https from "https";
import * as queryString from "querystring";
import * as url from "url";
import { deprecate } from "util";
export { GameProfile } from "@xmcl/common";
function parseTexturesInfo(profile: GameProfile): GameProfile.TexturesInfo | undefined {
if (!profile.properties || !profile.properties.textures) { return undefined; }
return JSON.parse(Buffer.from(profile.properties.textures, "base64").toString());
}
export namespace ProfileService {
export interface API {
/**
* The PEM public key
*/
publicKey?: string;
/**
* Full url to query profile by uuid. Place the uuid as `${uuid}` in this url
*/
profile: string;
/**
* Full url to query profile by name. Place the name as `${name}` in this url
*/
profileByName: string;
/**
* Full url to set texture by profile uuid and texture type. Place uuid as `${uuid}` and type as `${type}`
*/
texture: string;
}
export namespace API {
/**
* Replace `${uuid}` string into uuid param
* @param api The api
* @param uuid The uuid will be replaced
*/
export function getProfileUrl(api: API, uuid: string) {
return api.profile.replace("${uuid}", uuid);
}
/**
* Replace `${name}` string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function | (uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param api
*/
export async function setTexture(option: {
accessToken: string,
uuid: string,
type: "skin" | "cape" | "elytra",
texture?: GameProfile.Texture,
data?: Buffer,
}, api: API = API_MOJANG): Promise<void> {
const textUrl = url.parse(API.getTextureUrl(api, option.uuid, option.type));
const headers: any = { Authorization: `Bearer: ${option.accessToken}` };
const requireEmpty = (httpOption: https.RequestOptions, content?: string | Buffer) =>
new Promise<void>((resolve, reject) => {
const req = https.request(httpOption, (inc) => {
let d = "";
inc.on("error", (e) => { reject(e); });
inc.on("data", (b) => d += b.toString());
inc.on("end", () => {
if (d === "" && inc.statusCode === 204) { resolve(); } else { reject(JSON.parse(d)); }
});
});
req.on("error", (e) => reject(e));
if (content) { req.write(content); }
req.end();
});
if (!option.texture) {
return requireEmpty({
method: "DELETE",
path: textUrl.path,
host: textUrl.host,
headers,
});
} else if (option.data) {
let status = 0;
const boundary = `----------------------${crypto.randomBytes(8).toString("hex")}`;
let buff: ByteBuffer = new ByteBuffer();
const diposition = (key: string, value: string) => {
if (status === 0) {
buff.writeUTF8String(`--${boundary}\r\nContent-Disposition: form-data`);
status = 1;
}
buff.writeUTF8String(`; ${key}="${value}"`);
};
const header = (key: string, value: string) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
status = 2;
}
buff.writeUTF8String(`${key}:${value}\r\n`);
};
const content = (payload: Buffer) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
}
status = 0;
buff.writeUTF8String("\r\n");
buff = buff.append(payload);
buff.writeUTF8String("\r\n");
};
const finish = () => {
buff.writeUTF8String(`--${boundary}--\r\n`);
};
if (option.texture.metadata) {
for (const key in option.texture.metadata) {
diposition("name", key);
content(option.texture.metadata[key]);
}
}
diposition("name", "file");
header("Content-Type", "image/png");
content(option.data);
finish();
buff.flip();
const out = Buffer.from(buff.toArrayBuffer());
headers["Content-Type"] = `multipart/form-data; boundary=${boundary}`;
headers["Content-Length"] = out.byteLength;
return requireEmpty({
method: "PUT",
host: textUrl.host,
path: textUrl.path,
headers,
}, out);
} else if (option.texture.url) {
const param = new url.URLSearchParams(Object.assign({ url: option.texture.url }, option.texture.metadata)).toString();
headers["Content-Type"] = "x-www-form-urlencoded";
headers["Content-Length"] = param.length;
return requireEmpty({
method: "POST",
host: textUrl.host,
path: textUrl.path,
headers,
}, param);
} else {
throw new Error("Illegal Option Format!");
}
}
}
ProfileService.cacheTextures = deprecate(ProfileService.cacheTextures, "Use cacheTexturesUri instead");
| fetch | identifier_name |
index.ts | import { GameProfile } from "@xmcl/common";
import { fetchBuffer, fetchJson, got } from "@xmcl/net";
import { vfs } from "@xmcl/util";
import ByteBuffer from "bytebuffer";
import * as crypto from "crypto";
import * as https from "https";
import * as queryString from "querystring";
import * as url from "url";
import { deprecate } from "util";
export { GameProfile } from "@xmcl/common";
function parseTexturesInfo(profile: GameProfile): GameProfile.TexturesInfo | undefined {
if (!profile.properties || !profile.properties.textures) { return undefined; }
return JSON.parse(Buffer.from(profile.properties.textures, "base64").toString());
}
export namespace ProfileService {
export interface API {
/**
* The PEM public key
*/
publicKey?: string;
/**
* Full url to query profile by uuid. Place the uuid as `${uuid}` in this url
*/
profile: string;
/**
* Full url to query profile by name. Place the name as `${name}` in this url
*/
profileByName: string;
/**
* Full url to set texture by profile uuid and texture type. Place uuid as `${uuid}` and type as `${type}`
*/
texture: string;
}
export namespace API {
/**
* Replace `${uuid}` string into uuid param
* @param api The api
* @param uuid The uuid will be replaced
*/
export function getProfileUrl(api: API, uuid: string) {
return api.profile.replace("${uuid}", uuid);
}
/**
* Replace `${name}` string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> |
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param api
*/
export async function setTexture(option: {
accessToken: string,
uuid: string,
type: "skin" | "cape" | "elytra",
texture?: GameProfile.Texture,
data?: Buffer,
}, api: API = API_MOJANG): Promise<void> {
const textUrl = url.parse(API.getTextureUrl(api, option.uuid, option.type));
const headers: any = { Authorization: `Bearer: ${option.accessToken}` };
const requireEmpty = (httpOption: https.RequestOptions, content?: string | Buffer) =>
new Promise<void>((resolve, reject) => {
const req = https.request(httpOption, (inc) => {
let d = "";
inc.on("error", (e) => { reject(e); });
inc.on("data", (b) => d += b.toString());
inc.on("end", () => {
if (d === "" && inc.statusCode === 204) { resolve(); } else { reject(JSON.parse(d)); }
});
});
req.on("error", (e) => reject(e));
if (content) { req.write(content); }
req.end();
});
if (!option.texture) {
return requireEmpty({
method: "DELETE",
path: textUrl.path,
host: textUrl.host,
headers,
});
} else if (option.data) {
let status = 0;
const boundary = `----------------------${crypto.randomBytes(8).toString("hex")}`;
let buff: ByteBuffer = new ByteBuffer();
const diposition = (key: string, value: string) => {
if (status === 0) {
buff.writeUTF8String(`--${boundary}\r\nContent-Disposition: form-data`);
status = 1;
}
buff.writeUTF8String(`; ${key}="${value}"`);
};
const header = (key: string, value: string) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
status = 2;
}
buff.writeUTF8String(`${key}:${value}\r\n`);
};
const content = (payload: Buffer) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
}
status = 0;
buff.writeUTF8String("\r\n");
buff = buff.append(payload);
buff.writeUTF8String("\r\n");
};
const finish = () => {
buff.writeUTF8String(`--${boundary}--\r\n`);
};
if (option.texture.metadata) {
for (const key in option.texture.metadata) {
diposition("name", key);
content(option.texture.metadata[key]);
}
}
diposition("name", "file");
header("Content-Type", "image/png");
content(option.data);
finish();
buff.flip();
const out = Buffer.from(buff.toArrayBuffer());
headers["Content-Type"] = `multipart/form-data; boundary=${boundary}`;
headers["Content-Length"] = out.byteLength;
return requireEmpty({
method: "PUT",
host: textUrl.host,
path: textUrl.path,
headers,
}, out);
} else if (option.texture.url) {
const param = new url.URLSearchParams(Object.assign({ url: option.texture.url }, option.texture.metadata)).toString();
headers["Content-Type"] = "x-www-form-urlencoded";
headers["Content-Length"] = param.length;
return requireEmpty({
method: "POST",
host: textUrl.host,
path: textUrl.path,
headers,
}, param);
} else {
throw new Error("Illegal Option Format!");
}
}
}
ProfileService.cacheTextures = deprecate(ProfileService.cacheTextures, "Use cacheTexturesUri instead");
| {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
} | identifier_body |
index.ts | import { GameProfile } from "@xmcl/common";
import { fetchBuffer, fetchJson, got } from "@xmcl/net";
import { vfs } from "@xmcl/util";
import ByteBuffer from "bytebuffer";
import * as crypto from "crypto";
import * as https from "https";
import * as queryString from "querystring";
import * as url from "url";
import { deprecate } from "util";
export { GameProfile } from "@xmcl/common";
function parseTexturesInfo(profile: GameProfile): GameProfile.TexturesInfo | undefined {
if (!profile.properties || !profile.properties.textures) { return undefined; }
return JSON.parse(Buffer.from(profile.properties.textures, "base64").toString());
}
export namespace ProfileService {
export interface API {
/**
* The PEM public key
*/
publicKey?: string;
/**
* Full url to query profile by uuid. Place the uuid as `${uuid}` in this url
*/
profile: string;
/**
* Full url to query profile by name. Place the name as `${name}` in this url
*/
profileByName: string;
/**
* Full url to set texture by profile uuid and texture type. Place uuid as `${uuid}` and type as `${type}`
*/
texture: string;
}
export namespace API {
/**
* Replace `${uuid}` string into uuid param
* @param api The api
* @param uuid The uuid will be replaced
*/
export function getProfileUrl(api: API, uuid: string) {
return api.profile.replace("${uuid}", uuid);
}
/**
* Replace `${name}` string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param api
*/
export async function setTexture(option: {
accessToken: string,
uuid: string,
type: "skin" | "cape" | "elytra",
texture?: GameProfile.Texture,
data?: Buffer,
}, api: API = API_MOJANG): Promise<void> {
const textUrl = url.parse(API.getTextureUrl(api, option.uuid, option.type));
const headers: any = { Authorization: `Bearer: ${option.accessToken}` };
const requireEmpty = (httpOption: https.RequestOptions, content?: string | Buffer) =>
new Promise<void>((resolve, reject) => {
const req = https.request(httpOption, (inc) => {
let d = "";
inc.on("error", (e) => { reject(e); });
inc.on("data", (b) => d += b.toString());
inc.on("end", () => {
if (d === "" && inc.statusCode === 204) { resolve(); } else { reject(JSON.parse(d)); }
});
});
req.on("error", (e) => reject(e));
if (content) { req.write(content); }
req.end();
});
if (!option.texture) {
return requireEmpty({
method: "DELETE",
path: textUrl.path,
host: textUrl.host,
headers,
});
} else if (option.data) {
let status = 0;
const boundary = `----------------------${crypto.randomBytes(8).toString("hex")}`;
let buff: ByteBuffer = new ByteBuffer();
const diposition = (key: string, value: string) => {
if (status === 0) {
buff.writeUTF8String(`--${boundary}\r\nContent-Disposition: form-data`);
status = 1;
}
buff.writeUTF8String(`; ${key}="${value}"`);
};
const header = (key: string, value: string) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
status = 2;
}
buff.writeUTF8String(`${key}:${value}\r\n`);
};
const content = (payload: Buffer) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
}
status = 0; | };
const finish = () => {
buff.writeUTF8String(`--${boundary}--\r\n`);
};
if (option.texture.metadata) {
for (const key in option.texture.metadata) {
diposition("name", key);
content(option.texture.metadata[key]);
}
}
diposition("name", "file");
header("Content-Type", "image/png");
content(option.data);
finish();
buff.flip();
const out = Buffer.from(buff.toArrayBuffer());
headers["Content-Type"] = `multipart/form-data; boundary=${boundary}`;
headers["Content-Length"] = out.byteLength;
return requireEmpty({
method: "PUT",
host: textUrl.host,
path: textUrl.path,
headers,
}, out);
} else if (option.texture.url) {
const param = new url.URLSearchParams(Object.assign({ url: option.texture.url }, option.texture.metadata)).toString();
headers["Content-Type"] = "x-www-form-urlencoded";
headers["Content-Length"] = param.length;
return requireEmpty({
method: "POST",
host: textUrl.host,
path: textUrl.path,
headers,
}, param);
} else {
throw new Error("Illegal Option Format!");
}
}
}
ProfileService.cacheTextures = deprecate(ProfileService.cacheTextures, "Use cacheTexturesUri instead"); | buff.writeUTF8String("\r\n");
buff = buff.append(payload);
buff.writeUTF8String("\r\n"); | random_line_split |
index.ts | import { GameProfile } from "@xmcl/common";
import { fetchBuffer, fetchJson, got } from "@xmcl/net";
import { vfs } from "@xmcl/util";
import ByteBuffer from "bytebuffer";
import * as crypto from "crypto";
import * as https from "https";
import * as queryString from "querystring";
import * as url from "url";
import { deprecate } from "util";
export { GameProfile } from "@xmcl/common";
function parseTexturesInfo(profile: GameProfile): GameProfile.TexturesInfo | undefined {
if (!profile.properties || !profile.properties.textures) { return undefined; }
return JSON.parse(Buffer.from(profile.properties.textures, "base64").toString());
}
export namespace ProfileService {
export interface API {
/**
* The PEM public key
*/
publicKey?: string;
/**
* Full url to query profile by uuid. Place the uuid as `${uuid}` in this url
*/
profile: string;
/**
* Full url to query profile by name. Place the name as `${name}` in this url
*/
profileByName: string;
/**
* Full url to set texture by profile uuid and texture type. Place uuid as `${uuid}` and type as `${type}`
*/
texture: string;
}
export namespace API {
/**
* Replace `${uuid}` string into uuid param
* @param api The api
* @param uuid The uuid will be replaced
*/
export function getProfileUrl(api: API, uuid: string) {
return api.profile.replace("${uuid}", uuid);
}
/**
* Replace `${name}` string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) |
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param api
*/
export async function setTexture(option: {
accessToken: string,
uuid: string,
type: "skin" | "cape" | "elytra",
texture?: GameProfile.Texture,
data?: Buffer,
}, api: API = API_MOJANG): Promise<void> {
const textUrl = url.parse(API.getTextureUrl(api, option.uuid, option.type));
const headers: any = { Authorization: `Bearer: ${option.accessToken}` };
const requireEmpty = (httpOption: https.RequestOptions, content?: string | Buffer) =>
new Promise<void>((resolve, reject) => {
const req = https.request(httpOption, (inc) => {
let d = "";
inc.on("error", (e) => { reject(e); });
inc.on("data", (b) => d += b.toString());
inc.on("end", () => {
if (d === "" && inc.statusCode === 204) { resolve(); } else { reject(JSON.parse(d)); }
});
});
req.on("error", (e) => reject(e));
if (content) { req.write(content); }
req.end();
});
if (!option.texture) {
return requireEmpty({
method: "DELETE",
path: textUrl.path,
host: textUrl.host,
headers,
});
} else if (option.data) {
let status = 0;
const boundary = `----------------------${crypto.randomBytes(8).toString("hex")}`;
let buff: ByteBuffer = new ByteBuffer();
const diposition = (key: string, value: string) => {
if (status === 0) {
buff.writeUTF8String(`--${boundary}\r\nContent-Disposition: form-data`);
status = 1;
}
buff.writeUTF8String(`; ${key}="${value}"`);
};
const header = (key: string, value: string) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
status = 2;
}
buff.writeUTF8String(`${key}:${value}\r\n`);
};
const content = (payload: Buffer) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
}
status = 0;
buff.writeUTF8String("\r\n");
buff = buff.append(payload);
buff.writeUTF8String("\r\n");
};
const finish = () => {
buff.writeUTF8String(`--${boundary}--\r\n`);
};
if (option.texture.metadata) {
for (const key in option.texture.metadata) {
diposition("name", key);
content(option.texture.metadata[key]);
}
}
diposition("name", "file");
header("Content-Type", "image/png");
content(option.data);
finish();
buff.flip();
const out = Buffer.from(buff.toArrayBuffer());
headers["Content-Type"] = `multipart/form-data; boundary=${boundary}`;
headers["Content-Length"] = out.byteLength;
return requireEmpty({
method: "PUT",
host: textUrl.host,
path: textUrl.path,
headers,
}, out);
} else if (option.texture.url) {
const param = new url.URLSearchParams(Object.assign({ url: option.texture.url }, option.texture.metadata)).toString();
headers["Content-Type"] = "x-www-form-urlencoded";
headers["Content-Length"] = param.length;
return requireEmpty({
method: "POST",
host: textUrl.host,
path: textUrl.path,
headers,
}, param);
} else {
throw new Error("Illegal Option Format!");
}
}
}
ProfileService.cacheTextures = deprecate(ProfileService.cacheTextures, "Use cacheTexturesUri instead");
| {
tex.textures.SKIN = await cache(tex.textures.SKIN);
} | conditional_block |
weginfos.js | const ETAPPEN = [
// "Nummer": "Tournummer",
// "Land": "Wanderregion",
// "Berg": "Attraktion",
// "Beschreibung": "Tourbeschreibung",
// "Tourname": "Tourname",
// "Schwierigkeit": "Schwierigkeitsgrad",
// "Dauer": "Dauer",
// "KM": "Länge",
// "Aufstieg": "Aufstieg",
// "Abstieg": "Abstieg"},
// }, {
// "Nummer": "1",
// "Land": "Deutschland",
// "Berg": "Herzogstand",
// "Beschreibung": "Vom Herzogstand bis Heimgarten führt eine herrliche Gratüberschreitung. Mit grandiosen Aussichten in das Bayerische Alpenvorland darf als die wohl schönste Foto-Wanderung in den Münchner Hausbergen bezeichnet werden! Eine wahre Genusstour für Bergliebhaber mit einer einfachen Variante für Gelegenheitswanderer.",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "7",
// "KM": "14,2",
// "Aufstieg": 1186,
// "Abstieg": 1186
// },
// {
// "Nummer": "2",
// "Land": "Deutschland",
// "Berg": "Schrecksee",
// "Beschreibung": "Der Schrecksee ist wohl unbestritten der schönste Bergsee der deutschen Alpen. Eingebettet in Wiesenhänge und dekoriert mit einer kleinen Insel, ist der Schrecksee ein besonderes Idyll in den Allgäuer Alpen.",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "9",
// "KM": "20",
// "Aufstieg": 1600,
// "Abstieg": 1600
// },
// {
// "Nummer": "3",
// "Land": "Deutschland",
// "Berg": "Geroldsee",
// "Beschreibung": "Zwischen Mittenwald und Garmisch-Partenkirchen ruht ein unscheinbar gelegener, kleiner See: der Geroldsee. Wenn man nicht wüsste, dass hier die Natur (und einige Bauern) ein besonders wohlgeformtes Idyll erschaffen haben, man würde wohl nur zügig vorbeifahren.",
// "Tourname": "",
// "Schwierigkeit": "leicht",
// "Dauer": "2,4",
// "KM": "10",
// "Aufstieg": 160,
// "Abstieg": 160
// },
// {
// "Nummer": "4",
// "Land": "Deutschland",
// "Berg": "Königssee",
// "Beschreibung": "Das Berchtesgadener Land ist eine der schönsten Landschaften Deutschlands. Königssee und Obersee erinnern mit den angrenzenden Bergen an norwegische Fjorde. Die beiden Seen liegen inmitten des Nationalpark Berchtesgaden und bieten zahlreiche spannende Fotomotive für Alpenfans. Die Kirche St.Bartholomä vor der beeindruckenden Watzmann Ostwand und das Bootshäuschen am Obersee sind die bekanntesten darunter.",
// "Tourname": "",
// "Schwierigkeit": "leicht",
// "Dauer": "1,4",
// "KM": "5,5",
// "Aufstieg": 105,
// "Abstieg": 105
// },
// {
// "Nummer": "5",
// "Land": "Deutschland",
// "Berg": "Schloss Neuschwanstein",
// "Beschreibung": "Das Schloss Neuschwanstein ist ohne Zweifel einer der größten Besuchermagneten der Bayerischen Alpen und sogar Deutschlands. Diese Tour nimmt alle bekannten Fotospots mit, würzt dies mit ruhigen Ecken und wenig bekannten Ausblicken und ist dabei abwechslungsreich und wird nicht langweilig. Und bei rund 20km Distanz mit 1000 Höhenmetern ist sie alles andere als eine schnelle Sehenswürdigkeit, die man auf einer Europareise erleben kann! ",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "8",
// "KM": "20",
// "Aufstieg": 970,
// "Abstieg": 970
// },
{
"Nummer":"6",
"Land": "Österreich",
"Berg": "Seebensee",
"Beschreibung": "Um das Panorama eines bekannten Berges zu genießen, muss man sich bekanntermaßen in den umliegenden Bergen aufhalten. So auch bei dieser Wanderung mit Zugspitzpanorama. ",
"Tourname": "Seebensee",
"Schwierigkeit": "mittel/schwer",
"Dauer": "5",
"KM": "13",
"Aufstieg": 880,
"Abstieg": 880
},
{
"Nummer": "7",
"Land": "Österreich",
"Berg": "Olpererhütte",
"Beschreibung": "Der bekannteste Fotospot Tirols dürfte wohl eine kleine Hängebrücke im Zillertal sein. Unweit der Olperer Hütte zieht die kleine Brücke täglich hunderte Bergwanderer in seinen Bann.",
"Tourname": "Olpererhütte",
"Schwierigkeit": "mittel",
"Dauer": "5,1",
"KM": "14",
"Aufstieg": 875,
"Abstieg": 875
},
{
"Nummer": "8",
"Land": "Österreich",
"Berg": "Zell am See",
"Beschreibung": "Im Salzburger Land liegt eine der beliebtesten Sehenswürdigkeiten der Alpen: Zell am See mit dem dazugehörigen Zeller See. ",
"Tourname": "Zell am See",
"Schwierigkeit": "mittel/schwer",
"Dauer": "8",
"KM": "20",
"Aufstieg": 1470,
"Abstieg": 1470
},
{
"Nummer": "9",
"Land": "Österreich",
"Berg": "Dachstein",
"Beschreibung": "Mit 850 Höhenmetern zählt die Dachstein-Südwand zu den imposantesten Wänden der Ostalpen. Der „Dachstein-Professor“ Friedrich Simony ließ nach seiner Besteigung des Dachsteins von Süden durchzogen von „recht abscheulichem Klettern“ mit Hilfe einiger Gönner den \"Dachstein Randkluftsteig\" errichten, den ersten Klettersteig der Alpen.",
"Tourname": "Johann Klettersteig ",
"Schwierigkeit": "schwer",
"Dauer": "8",
"KM": "n.a.",
"Aufstieg": "1200",
"Abstieg": "200"
},
{
"Nummer": "10",
"Land": "Österreich",
"Berg": "Hallstatt",
"Beschreibung": "Kaum ein Ort in den Ostalpen steht so sehr für die Alpenidylle wie das kleine Dorf Hallstatt. Die Chinesen waren gleich so verzückt, dass sie die Gemeinde (samt See!) als Attraktion nachbauten.",
"Tourname": "Hallstatt",
"Schwierigkeit": "leicht/mittel",
"Dauer": "2",
"KM": "5,6",
"Aufstieg": 330,
"Abstieg": 330
},
{
"Nummer": "11",
"Land": "Österreich",
"Berg": "Achensee",
"Beschreibung": "Zwischen Rofan und Karwendel erstreckt sich malerisch der Achensee. Der länglich gezogene See erscheint eingezwängt in den steilen Berghängen wie ein klassischer norwegischer Fjord. ",
"Tourname": "Bärenkopf",
"Schwierigkeit": "mittel/schwer",
"Dauer": "6,2",
"KM": "16",
"Aufstieg": 1240,
"Abstieg": 1240
},
{
"Nummer": "12",
"Land": "Österreich",
"Berg": "Innsbruck Karwendel",
"Beschreibung": "Sie ist die österreichweit einzige Stadt inmitten einer alpinen Bergwelt: Innsbruck. Die umgebenden Berge sind allesamt zu bewandern, teils erleichtern Gondelbahnen den Auf- und Abstieg direkt aus dem Stadtzentrum.",
"Tourname": "Karwendelblick hoch über Innsbruck",
"Schwierigkeit": "mittel",
"Dauer": "4",
"KM": "10",
"Aufstieg": 740, | "Berg": "Innsbruck Citytour",
"Beschreibung": "Innsbruck ist keine überwältigend große Metropole. Ihre Einzigartigkeit besteht dafür in ihrer alpinen Lage. Blicke aus der Innenstadt gen Himmel bleiben an den prominenten, die Stadt umrahmenden Bergketten hängen.",
"Tourname": "Die Hauptstadt der Alpen urban entdecken",
"Schwierigkeit": "leicht",
"Dauer": "2",
"KM": "6",
"Aufstieg": 0,
"Abstieg": 0
}
// {
// "Nummer": "14",
// "Land": "Schweiz",
// "Berg": "Zermatt – Matterhorn",
// "Beschreibung": "Eine der großartigsten Kulissen der Alpen und das beste Spotlight auf das Wahrzeichen der Schweiz werden bei dieser Tour mit relativ wenig Anstrengung verbunden. Der unverkennbare Blick auf den Toblerone-Gipfel kombiniert mit seiner Spiegelung in einem der Seen ist Zweifelsohne das wohl bekannteste Panorama der Schweiz. ",
// "Tourname": "",
// "Schwierigkeit": "mittel",
// "Dauer": "4",
// "KM": "9,1",
// "Aufstieg": 680,
// "Abstieg": 680
// },
// {
// "Nummer": "15",
// "Land": "Schweiz",
// "Berg": "Saxer Lücke - Fählensee",
// "Beschreibung": "Mit einer nahezu surrealen Zahl an imposanten Aussichten punktet diese Tour im Alpstein-Massiv. Die Wanderung entlang des Stauberenfirst führt den Fotowanderer zu einem der bekanntesten Fotospots der Schweiz: Die Saxer Lücke. Nahezu senkrecht erheben sich hier die Felsplatten aus dem Rheintal fast 2000 Höhenmeter und gipfeln in markant geschwungenen Felsplatten.",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "6,5",
// "KM": "17,5",
// "Aufstieg": 660,
// "Abstieg": 1530
// },
// {
// "Nummer": "16",
// "Land": "Schweiz",
// "Berg": "Bachalpsee",
// "Beschreibung": "Das Dreigestirn Eiger-Mönch-Jungfrau ist weit über die Landesgrenzen hinaus bekannt und zieht viele Besucher in diese Region. Die massiven Felsformationen stehen im malerischen Kontrast mit ewigem Eis, hohen Felsklippen, wasserreichen Bergbächen und einem saftig-grünen Tal.",
// "Tourname": "",
// "Schwierigkeit": "mittel/schwer",
// "Dauer": "6",
// "KM": "16",
// "Aufstieg": 780,
// "Abstieg": 1400
// },
// {
// "Nummer":"17",
// "Land": "Schweiz",
// "Berg": "Alpstein Seealpsee + Äscher ",
// "Beschreibung": "Das Gasthaus Äscher in der Nordostschweiz ist das prominenteste Gasthaus der Alpen. Binnen weniger Jahre hat es sich – aufgrund seiner atemberaubenden, einzigartigen Lage, eingerahmt von einem massiven Felsvorsprung – von einem unscheinbaren Geheimtipp zu weltweiter Beliebtheit verwandelt. Seit den 2010er Jahren gilt es als einer der meistbesuchten Orte der Schweiz. ",
// "Tourname": "",
// "Schwierigkeit": "mittel",
// "Dauer": "4,6",
// "KM": "11",
// "Aufstieg": 630,
// "Abstieg": 1350
// },
// {
// "Nummer": "18",
// "Land": "Schweiz",
// "Berg": "Augstmatthorn",
// "Beschreibung": "Steil aber zugleich sanft schmiegen sich Bergwiesen an die steilen Hänge. Wie gemalt führen sie in vielen Schwüngen Richtung Horizont. 1500 Tiefenmeter bis hinunter an den Brienzersee und nach Interlaken verstärken den luftigen Eindruck. Diese eindrucksvolle Wanderung führt entlang eines außergewöhnlichen Steilwiesengrats: Der Brienzergrat (auch als Hardergrat bezeichnet).",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "7",
// "KM": "16",
// "Aufstieg": 1680,
// "Abstieg": 980
// },
// {
// "Nummer": "19",
// "Land": "Schweiz",
// "Berg": "Rigi",
// "Beschreibung": "Die Königin Rigi ist mehr als ein Berg. So jedenfalls beschreibt der am Fuße der Rigi aufgewachsene Publizist Adi Kälin beschreibt den Ausflugsberg in seinem gleichnamigen Bilder- und Geschichtenbuch. ",
// "Tourname": "",
// "Schwierigkeit": "leicht/mittel",
// "Dauer": "3,2",
// "KM": "10,5",
// "Aufstieg": 120,
// "Abstieg": 800
// },
// {
// "Nummer": "20",
// "Land": "Italien",
// "Berg": "Vilnöss - Kirche St. Ranui",
// "Beschreibung": "Wohl jeder Alpenfan hat schon einmal ein Bild eines Kirchleins vor einer massiven Dolomit-Kulisse gesehen. Die Bilderbuch-Version eines solchen befindet sich in Sankt Magdalena im Villnösstal. Mit einem einfachen, aber genussvollen Halbtagesspaziergang verknüpft man die schönsten Foto- und Aussichtspunkte. Sehenswert ist zudem ein Besuch im örtlichen Museum.",
// "Tourname": "",
// "Schwierigkeit": "leicht",
// "Dauer": "2",
// "KM": "5",
// "Aufstieg": 150,
// "Abstieg": 200
// },
// {
// "Nummer": "21",
// "Land": "Italien",
// "Berg": "Pragser Wildsee – Lago di Braies",
// "Beschreibung": "Bei Fotografen wohl mehr als bekannt, ist der Pragser Wildsee (ital. Lago die Braies) seit mehr als einem Jahrhundert ein beliebter Ausflugsort in den Alpen für Jung und Alt. Hier trifft ein ruhiger Bergsee auf touristische Erschließung. Die beeindruckende Krönung des Panoramas ist jedoch die unglaublich mächtige Nordwand des Seekofels, die den Blick beschränkt.",
// "Tourname": "",
// "Schwierigkeit": "leicht",
// "Dauer": "2",
// "KM": "6,5",
// "Aufstieg": 150,
// "Abstieg": 150
// },
// {
// "Nummer": "22",
// "Land": "Italien",
// "Berg": "Drei Zinnen",
// "Beschreibung": "Die Drei Zinnen sind das berühmte Wahrzeichen der Dolomiten. Die massiven Nordwände der drei einzigartig geformten Felsbänke sind das wohl begehrteste Fotomotiv im schönsten Gebirge der Welt. ",
// "Tourname": "",
// "Schwierigkeit": "leicht/mittel",
// "Dauer": "4",
// "KM": "9,3",
// "Aufstieg": 407,
// "Abstieg": 407
// },
// {
// "Nummer": "23",
// "Land": "Italien",
// "Berg": "Cinque Torri",
// "Beschreibung": "Paul Grohmann, Alpinist und Mitberünder des österreichischen Alpenvereins, war bereits im 19. Jahrhundert von den einmaligen Aussichten rund um die Cinque Torri begeistert: „Ein Meer von Bergen liegt vor uns und es wäre ganz vergeblich, diese aufzuzählen oder schildern zu wollen. Nur die Kamera könnte den Eindruck teilweise wiedergeben.“",
// "Tourname": "",
// "Schwierigkeit": "mittel",
// "Dauer": "4",
// "KM": "10,5",
// "Aufstieg": 600,
// "Abstieg": 600
// },
// {
// "Nummer": "24",
// "Land": "Italien",
// "Berg": "Seiser Alm ",
// "Beschreibung": "Die Seiser Alm ist die größte Hochalm der Alpen und gleichsam wohl einer der malerischsten Orte der Dolomiten.",
// "Tourname": "",
// "Schwierigkeit": "mittel",
// "Dauer": "4",
// "KM": "13",
// "Aufstieg": 300,
// "Abstieg": 300
// },
// {
// "Nummer": "25",
// "Land": "Italien",
// "Berg": "Seceda",
// "Beschreibung": "Die Puez-Geislergruppe begeistert mit einer außergewöhnlichen landschaftlichen Vielfalt. Am Hotspot Seceda versammeln sich die Landschaftsfotografen zu allen Tageszeiten. Die eindrucksvollen, steil abfallenden Puez-Geislerspitzen kontrastieren zu den perfekt gepflegten Almwiesen des Skigebiets hoch über dem Grödnertal. ",
// "Tourname": "",
// "Schwierigkeit": "leicht/mittel",
// "Dauer": "2",
// "KM": "6,5",
// "Aufstieg": 132,
// "Abstieg": 480
// },
// {
// "Nummer": "26",
// "Land": "Italien",
// "Berg": "Courmayeur – Val Ferret",
// "Beschreibung": "Von Hütte zu Hütte die Alpen Erwandern ist Traum vieler Wanderenthusiasten. Unter den Myriaden an Möglichkeiten ist die \"Tour du Mont-Blanc\" der wohl bekannteste Hütten-Trek der Alpen. Der Weitwanderweg schlängelt sich um das Massiv des Mont Blancs und durchquert dabei italienisches, schweizerisches und französisches Staatsgebiet. ",
// "Tourname": "",
// "Schwierigkeit": "mittel/schwer",
// "Dauer": "5,2",
// "KM": "13,8",
// "Aufstieg": 960,
// "Abstieg": 590
// },
// {
// "Nummer": "27",
// "Land": "Frankreich",
// "Berg": "Aiguille du Midi ",
// "Beschreibung": "Die beeindruckendste Aussichtsplattform der Alpen ist wohl ohne Zweifel die Aiguille du Midi. Wie ein Adlerhorst klebt sie unnachahmlich auf den tausend Meter hohen Felszacken zwischen Abgrund und Gletscher.",
// "Tourname": "",
// "Schwierigkeit": "leicht/mittel",
// "Dauer": "2,5",
// "KM": "6,5",
// "Aufstieg": 130,
// "Abstieg": 520
// },
// {
// "Nummer": "28",
// "Land": "Frankreich",
// "Berg": "Mont Blanc-Lac Blanc",
// "Beschreibung": "„Südbalkon“ wird das alpine Plateau nordwestlich von Mont Blanc und Chamonix genannt. Zwischen Lac Blanc – dem weißen See – und den kleineren Lacs de Chéserys befindet sich die unbestritten schönste Aussicht auf das gesamte Mont Blanc Massiv.",
// "Tourname": "Chamonix-Lac Blanc",
// "Schwierigkeit": "mittel",
// "Dauer": "4",
// "KM": "9",
// "Aufstieg": 930,
// "Abstieg": 480
// },
// {
// "Nummer": "29",
// "Land": "Frankreich",
// "Berg": "Ecrins/La Mejie",
// "Beschreibung": "Die Meije ist bei deutschsprachigen Alpenfreunden zumeist nur im Kontext Alpinbergsteigen und Freeriden bekannt. Unter diesen Sportlern gilt der markante Berg im Pelvoux in den Dauphiné-Alpen als Wahrzeichen der französischen Westalpen. Dennoch ist die Region im deutschsprachigen Raum relativ unbeachtet: Mit ihren 3984m fehlen ihr schließlich auch 16 Meter für den famosen 4000er-Stempel. ",
// "Tourname": "",
// "Schwierigkeit": "mittel/schwer",
// "Dauer": "5,5",
// "KM": "15,5",
// "Aufstieg": 860,
// "Abstieg": 860
// },
// {
// "Nummer": "30",
// "Land": "Slowenien",
// "Berg": "Bleder See",
// "Beschreibung": "Ein romantisches Kirchlein inmitten einer kleinen Insel auf einem See inmitten von Bergen. Der Bleder See ist der wohl meistfotografierte Ort in Slowenien und sicherlich auch in den Slowenischen Alpen.",
// "Tourname": "Traumblicke in Slowenien ",
// "Schwierigkeit": "leicht",
// "Dauer": "1,5",
// "KM": "4",
// "Aufstieg": 270,
// "Abstieg": 270
// }
] | "Abstieg": 740
},
{
"Nummer": "13",
"Land": "Österreich", | random_line_split |
process.go | package process
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/pinpt/ripsrc/ripsrc/parentsgraph"
"github.com/pinpt/ripsrc/ripsrc/gitblame2"
"github.com/pinpt/ripsrc/ripsrc/pkg/logger"
"github.com/pinpt/ripsrc/ripsrc/history3/process/repo"
"github.com/pinpt/ripsrc/ripsrc/gitexec"
"github.com/pinpt/ripsrc/ripsrc/history3/incblame"
"github.com/pinpt/ripsrc/ripsrc/history3/process/parser"
)
type Process struct {
opts Opts
gitCommand string
graph *parentsgraph.Graph
childrenProcessed map[string]int
maxLenOfStoredTree int
mergePartsCommit string
// map[parent_diffed]parser.Commit
mergeParts map[string]parser.Commit
timing *Timing
repo repo.Repo
unloader *repo.Unloader
checkpointsDir string
lastProcessedCommitHash string
}
type Opts struct {
Logger logger.Logger
RepoDir string
// CheckpointsDir is the directory to store incremental data cache for this repo
// If empty, directory is created inside repoDir
CheckpointsDir string
// NoStrictResume forces incremental processing to avoid checking that it continues from the same commit in previously finished on. Since incrementals save a large number of previous commits, it works even starting on another commit.
NoStrictResume bool
// CommitFromIncl process starting from this commit (including this commit).
CommitFromIncl string
// CommitFromMakeNonIncl by default we start from passed commit and include it. Set CommitFromMakeNonIncl to true to avoid returning it, and skipping reading/writing checkpoint.
CommitFromMakeNonIncl bool
// DisableCache is unused.
DisableCache bool
// AllBranches set to true to process all branches. If false, processes commits starting from HEAD only.
AllBranches bool
// WantedBranchRefs filter branches. When CommitFromIncl and AllBranches is set this is required.
WantedBranchRefs []string
// ParentsGraph is optional graph of commits. Pass to reuse, if not passed will be created.
ParentsGraph *parentsgraph.Graph
}
type Result struct {
Commit string
Files map[string]*incblame.Blame
}
func New(opts Opts) *Process {
s := &Process{}
if opts.Logger == nil {
opts.Logger = logger.NewDefaultLogger(os.Stdout)
}
s.opts = opts
s.gitCommand = "git"
s.timing = &Timing{}
if opts.CheckpointsDir != "" {
s.checkpointsDir = filepath.Join(opts.CheckpointsDir, "pp-git-cache")
} else {
s.checkpointsDir = filepath.Join(opts.RepoDir, "pp-git-cache")
}
return s
}
func (s *Process) Timing() Timing {
return *s.timing
}
func (s *Process) initCheckpoints() error {
if s.opts.CommitFromIncl == "" {
s.repo = repo.New()
} else {
expectedCommit := ""
if s.opts.NoStrictResume {
// validation disabled
} else {
expectedCommit = s.opts.CommitFromIncl
}
reader := repo.NewCheckpointReader(s.opts.Logger)
r, err := reader.Read(s.checkpointsDir, expectedCommit)
if err != nil {
return fmt.Errorf("Could not read checkpoint: %v", err)
}
s.repo = r
}
s.unloader = repo.NewUnloader(s.repo)
return nil
}
func (s *Process) Run(resChan chan Result) error {
defer func() {
close(resChan)
}()
if s.opts.ParentsGraph != nil {
s.graph = s.opts.ParentsGraph
} else {
s.graph = parentsgraph.New(parentsgraph.Opts{
RepoDir: s.opts.RepoDir,
AllBranches: s.opts.AllBranches,
Logger: s.opts.Logger,
})
err := s.graph.Read()
if err != nil {
return err
}
}
s.childrenProcessed = map[string]int{}
r, err := s.gitLogPatches()
if err != nil {
return err
}
defer r.Close()
commits := make(chan parser.Commit)
p := parser.New(r)
done := make(chan bool)
go func() {
defer func() {
done <- true
}()
err := p.Run(commits)
if err != nil {
panic(err)
}
}()
drainAndExit := func() {
for range commits {
}
<-done
}
i := 0
for commit := range commits {
if i == 0 {
err := s.initCheckpoints()
if err != nil {
drainAndExit()
return err
}
}
i++
commit.Parents = s.graph.Parents[commit.Hash]
err := s.processCommit(resChan, commit)
if err != nil {
drainAndExit()
return err
}
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) |
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no need to copy files from prev
return
}
// copy unchanged from prev
p := commit.Parents[0]
files := s.repo.GetCommitMust(p)
for fp := range files {
// was in the diff changes, nothing to do
if _, ok := res.Files[fp]; ok {
continue
}
blame, err := s.repo.GetFileMust(p, fp)
if err != nil {
rerr = fmt.Errorf("could not get parent file for unchanged: %v err: %v", commit.Hash, err)
return
}
// copy reference
s.repo[commit.Hash][fp] = blame
}
return
}
const deletedPrefix = "@@@del@@@"
func (s *Process) processMergeCommit(commitHash string, parts map[string]parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commitHash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commitHash, dur)
s.timing.MergesTime += dur
s.timing.MergesCount++
}()
// note that commit exists (important for empty commits)
s.repo.AddCommit(commitHash)
//fmt.Println("processing merge commit", commitHash)
parentHashes := s.graph.Parents[commitHash]
parentCount := len(parentHashes)
res.Commit = commitHash
res.Files = map[string]*incblame.Blame{}
// parse and organize all diffs for access
diffs := map[string][]*incblame.Diff{}
hashToParOrd := map[string]int{}
for i, h := range parentHashes {
hashToParOrd[h] = i
}
for parHash, part := range parts {
for _, ch := range part.Changes {
diff := incblame.Parse(ch.Diff)
key := ""
if diff.Path != "" {
key = diff.Path
} else {
key = deletedPrefix + diff.PathPrev
}
par, ok := diffs[key]
if !ok {
par = make([]*incblame.Diff, parentCount, parentCount)
diffs[key] = par
}
parInd := hashToParOrd[parHash]
par[parInd] = &diff
}
}
// get a list of all files
files := map[string]bool{}
for k := range diffs {
files[k] = true
}
// process all files
EACHFILE:
for k := range files {
diffs := diffs[k]
isDelete := true
for _, diff := range diffs {
if diff != nil && diff.Path != "" {
isDelete = false
}
}
//fmt.Println("diffs")
//for i, d := range diffs {
// fmt.Println(i, d)
//}
if isDelete {
// only showing deletes and files changed in merge comparent to at least one parent
pathPrev := k[len(deletedPrefix):]
res.Files[pathPrev] = &incblame.Blame{Commit: commitHash}
continue
}
// below k == new file path
binaryDiffs := 0
for _, diff := range diffs {
if diff == nil {
continue
}
if diff.IsBinary {
binaryDiffs++
}
}
binParentsWithDiffs := 0
for i, diff := range diffs {
if diff == nil {
continue
}
if diff.PathPrev == "" {
// create
continue
}
parent := parentHashes[i]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for merge bin parent. merge: %v %v", commitHash, err)
return
}
if pb.IsBinary {
binParentsWithDiffs++
}
}
// do not try to resolve the diffs for binary files in merge commits
if binaryDiffs != 0 || binParentsWithDiffs != 0 {
bl := incblame.BlameBinaryFile(commitHash)
s.repo[commitHash][k] = bl
res.Files[k] = bl
continue
}
/*
// file is a binary
if binaryDiffs == validDiffs {
bl := incblame.BlameBinaryFile(commitHash)
s.repoSave(commitHash, k, bl)
res.Files[k] = bl
continue
}
// file is not a binary but one of the parents was a binary, need to use a regular git blame
if binaryParents != 0 {
bl, err := s.slowGitBlame(commitHash, k)
if err != nil {
return res, err
}
s.repoSave(commitHash, k, &bl)
res.Files[k] = &bl
continue
}*/
for i, diff := range diffs {
if diff == nil {
// same as parent
parent := parentHashes[i]
pb := s.repo.GetFileOptional(parent, k)
if pb != nil {
// exacly the same as parent, no changes
s.repo[commitHash][k] = pb
continue EACHFILE
}
}
}
parents := []incblame.Blame{}
for i, diff := range diffs {
if diff == nil {
// no change use prev
parentHash := parentHashes[i]
parentBlame := s.repo.GetFileOptional(parentHash, k)
if parentBlame == nil {
panic(fmt.Errorf("merge: no change for file recorded, but parent does not contain file:%v merge commit:%v parent:%v", k, commitHash, parentHash))
}
parents = append(parents, *parentBlame)
continue
}
pathPrev := diff.PathPrev
if pathPrev == "" {
// this is create, no parent blame
parents = append(parents, incblame.Blame{})
continue
}
parentHash := parentHashes[i]
parentBlame, err := s.repo.GetFileMust(parentHash, pathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case1 merge file. merge: %v %v", commitHash, err)
return
}
parents = append(parents, *parentBlame)
}
//fmt.Println("path", k)
diffs2 := []incblame.Diff{}
for _, ob := range diffs {
if ob == nil {
ob = &incblame.Diff{}
}
diffs2 = append(diffs2, *ob)
}
blame := incblame.ApplyMerge(parents, diffs2, commitHash, k)
s.repo[commitHash][k] = &blame
// only showing deletes and files changed in merge comparent to at least one parent
res.Files[k] = &blame
}
// for merge commits we need to use the most updated copy
// get a list of all files in all parents
files = map[string]bool{}
for _, p := range parentHashes {
filesInCommit := s.repo.GetCommitMust(p)
for f := range filesInCommit {
files[f] = true
}
}
root := ""
for f := range files {
alreadyAddedAbove := false
{
bl := s.repo.GetFileOptional(commitHash, f)
if bl != nil {
alreadyAddedAbove = true
}
}
if alreadyAddedAbove {
continue
}
var candidates []*incblame.Blame
for _, p := range parentHashes {
bl := s.repo.GetFileOptional(p, f)
if bl != nil {
candidates = append(candidates, bl)
}
}
// only one branch has the file
if len(candidates) == 1 {
// copy reference
s.repo[commitHash][f] = candidates[0]
continue
}
if len(candidates) == 0 {
panic("no file candidates")
}
// TODO: if more than one candidate we pick at random right now
// Need to check if this is correct? If no change at merge to any that means they are all the same?
// Or we need to check the last common parent and see? This was added in the previous design so possible is not needed anymore.
/*
if root == "" {
// TODO: this is not covered by unit tests
ts := time.Now()
// find common parent commit for all
root = s.graph.Parents.LastCommonParent(parentHashes)
dur := time.Since(ts)
if dur > time.Second {
fmt.Printf("took %v to find last common parent for %v res: %v", dur, parentHashes, root)
}
}*/
var res2 *incblame.Blame
for _, c := range candidates {
// unchanged
//if c.Commit == root {
// continue
//}
res2 = c
}
if res2 == nil {
var err error
// all are unchanged
res2, err = s.repo.GetFileMust(root, f)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case2 merge file. merge: %v %v", commitHash, err)
return
}
}
s.repo[commitHash][f] = res2
}
return
}
func (s *Process) slowGitBlame(commitHash string, filePath string) (res incblame.Blame, _ error) {
bl, err := gitblame2.Run(s.opts.RepoDir, commitHash, filePath)
//fmt.Println("running regular blame for file switching from bin mode to regular")
if err != nil {
return res, err
}
res.Commit = commitHash
for _, l := range bl.Lines {
l2 := &incblame.Line{}
l2.Commit = l.CommitHash
l2.Line = []byte(l.Content)
res.Lines = append(res.Lines, l2)
}
return
}
func (s *Process) RunGetAll() (_ []Result, err error) {
res := make(chan Result)
done := make(chan bool)
go func() {
err = s.Run(res)
done <- true
}()
var res2 []Result
for r := range res {
res2 = append(res2, r)
}
<-done
return res2, err
}
func (s *Process) gitLogPatches() (io.ReadCloser, error) {
// empty file at temp location to set an empty attributesFile
f, err := ioutil.TempFile("", "ripsrc")
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
args := []string{
"-c", "core.attributesFile=" + f.Name(),
"-c", "diff.renameLimit=10000",
"log",
"-p",
"-m",
"--date-order",
"--reverse",
"--no-abbrev-commit",
"--pretty=short",
}
if s.opts.CommitFromIncl != "" {
if s.opts.AllBranches {
for _, c := range s.opts.WantedBranchRefs {
args = append(args, c)
}
}
pf := ""
if s.opts.CommitFromMakeNonIncl {
pf = "..HEAD"
} else {
pf = "^..HEAD"
}
args = append(args, s.opts.CommitFromIncl+pf)
} else {
if s.opts.AllBranches {
args = append(args, "--all")
}
}
ctx := context.Background()
//if s.opts.DisableCache {
return gitexec.ExecPiped(ctx, s.gitCommand, s.opts.RepoDir, args)
//}
//return gitexec.ExecWithCache(ctx, s.gitCommand, s.opts.RepoDir, args)
}
| {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
} | identifier_body |
process.go | package process
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/pinpt/ripsrc/ripsrc/parentsgraph"
"github.com/pinpt/ripsrc/ripsrc/gitblame2"
"github.com/pinpt/ripsrc/ripsrc/pkg/logger"
"github.com/pinpt/ripsrc/ripsrc/history3/process/repo"
"github.com/pinpt/ripsrc/ripsrc/gitexec"
"github.com/pinpt/ripsrc/ripsrc/history3/incblame"
"github.com/pinpt/ripsrc/ripsrc/history3/process/parser"
)
type Process struct {
opts Opts
gitCommand string
graph *parentsgraph.Graph
childrenProcessed map[string]int
maxLenOfStoredTree int
mergePartsCommit string
// map[parent_diffed]parser.Commit
mergeParts map[string]parser.Commit
timing *Timing
repo repo.Repo
unloader *repo.Unloader
checkpointsDir string
lastProcessedCommitHash string
}
type Opts struct {
Logger logger.Logger
RepoDir string
// CheckpointsDir is the directory to store incremental data cache for this repo
// If empty, directory is created inside repoDir
CheckpointsDir string
// NoStrictResume forces incremental processing to avoid checking that it continues from the same commit in previously finished on. Since incrementals save a large number of previous commits, it works even starting on another commit.
NoStrictResume bool
// CommitFromIncl process starting from this commit (including this commit).
CommitFromIncl string
// CommitFromMakeNonIncl by default we start from passed commit and include it. Set CommitFromMakeNonIncl to true to avoid returning it, and skipping reading/writing checkpoint.
CommitFromMakeNonIncl bool
// DisableCache is unused.
DisableCache bool
// AllBranches set to true to process all branches. If false, processes commits starting from HEAD only.
AllBranches bool
// WantedBranchRefs filter branches. When CommitFromIncl and AllBranches is set this is required.
WantedBranchRefs []string
// ParentsGraph is optional graph of commits. Pass to reuse, if not passed will be created.
ParentsGraph *parentsgraph.Graph
}
type Result struct {
Commit string
Files map[string]*incblame.Blame
}
func New(opts Opts) *Process {
s := &Process{}
if opts.Logger == nil {
opts.Logger = logger.NewDefaultLogger(os.Stdout)
}
s.opts = opts
s.gitCommand = "git"
s.timing = &Timing{}
if opts.CheckpointsDir != "" {
s.checkpointsDir = filepath.Join(opts.CheckpointsDir, "pp-git-cache")
} else {
s.checkpointsDir = filepath.Join(opts.RepoDir, "pp-git-cache")
}
return s
}
func (s *Process) Timing() Timing {
return *s.timing
}
func (s *Process) initCheckpoints() error {
if s.opts.CommitFromIncl == "" {
s.repo = repo.New()
} else {
expectedCommit := ""
if s.opts.NoStrictResume {
// validation disabled
} else {
expectedCommit = s.opts.CommitFromIncl
}
reader := repo.NewCheckpointReader(s.opts.Logger)
r, err := reader.Read(s.checkpointsDir, expectedCommit)
if err != nil {
return fmt.Errorf("Could not read checkpoint: %v", err)
}
s.repo = r
}
s.unloader = repo.NewUnloader(s.repo)
return nil
}
func (s *Process) Run(resChan chan Result) error {
defer func() {
close(resChan)
}()
if s.opts.ParentsGraph != nil {
s.graph = s.opts.ParentsGraph
} else {
s.graph = parentsgraph.New(parentsgraph.Opts{
RepoDir: s.opts.RepoDir,
AllBranches: s.opts.AllBranches,
Logger: s.opts.Logger,
})
err := s.graph.Read()
if err != nil {
return err
}
}
s.childrenProcessed = map[string]int{}
r, err := s.gitLogPatches()
if err != nil {
return err
}
defer r.Close()
commits := make(chan parser.Commit)
p := parser.New(r)
done := make(chan bool)
go func() {
defer func() {
done <- true
}()
err := p.Run(commits)
if err != nil {
panic(err)
}
}()
drainAndExit := func() {
for range commits {
}
<-done
}
i := 0
for commit := range commits {
if i == 0 {
err := s.initCheckpoints()
if err != nil {
drainAndExit()
return err
}
}
i++
commit.Parents = s.graph.Parents[commit.Hash]
err := s.processCommit(resChan, commit)
if err != nil |
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no need to copy files from prev
return
}
// copy unchanged from prev
p := commit.Parents[0]
files := s.repo.GetCommitMust(p)
for fp := range files {
// was in the diff changes, nothing to do
if _, ok := res.Files[fp]; ok {
continue
}
blame, err := s.repo.GetFileMust(p, fp)
if err != nil {
rerr = fmt.Errorf("could not get parent file for unchanged: %v err: %v", commit.Hash, err)
return
}
// copy reference
s.repo[commit.Hash][fp] = blame
}
return
}
const deletedPrefix = "@@@del@@@"
func (s *Process) processMergeCommit(commitHash string, parts map[string]parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commitHash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commitHash, dur)
s.timing.MergesTime += dur
s.timing.MergesCount++
}()
// note that commit exists (important for empty commits)
s.repo.AddCommit(commitHash)
//fmt.Println("processing merge commit", commitHash)
parentHashes := s.graph.Parents[commitHash]
parentCount := len(parentHashes)
res.Commit = commitHash
res.Files = map[string]*incblame.Blame{}
// parse and organize all diffs for access
diffs := map[string][]*incblame.Diff{}
hashToParOrd := map[string]int{}
for i, h := range parentHashes {
hashToParOrd[h] = i
}
for parHash, part := range parts {
for _, ch := range part.Changes {
diff := incblame.Parse(ch.Diff)
key := ""
if diff.Path != "" {
key = diff.Path
} else {
key = deletedPrefix + diff.PathPrev
}
par, ok := diffs[key]
if !ok {
par = make([]*incblame.Diff, parentCount, parentCount)
diffs[key] = par
}
parInd := hashToParOrd[parHash]
par[parInd] = &diff
}
}
// get a list of all files
files := map[string]bool{}
for k := range diffs {
files[k] = true
}
// process all files
EACHFILE:
for k := range files {
diffs := diffs[k]
isDelete := true
for _, diff := range diffs {
if diff != nil && diff.Path != "" {
isDelete = false
}
}
//fmt.Println("diffs")
//for i, d := range diffs {
// fmt.Println(i, d)
//}
if isDelete {
// only showing deletes and files changed in merge comparent to at least one parent
pathPrev := k[len(deletedPrefix):]
res.Files[pathPrev] = &incblame.Blame{Commit: commitHash}
continue
}
// below k == new file path
binaryDiffs := 0
for _, diff := range diffs {
if diff == nil {
continue
}
if diff.IsBinary {
binaryDiffs++
}
}
binParentsWithDiffs := 0
for i, diff := range diffs {
if diff == nil {
continue
}
if diff.PathPrev == "" {
// create
continue
}
parent := parentHashes[i]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for merge bin parent. merge: %v %v", commitHash, err)
return
}
if pb.IsBinary {
binParentsWithDiffs++
}
}
// do not try to resolve the diffs for binary files in merge commits
if binaryDiffs != 0 || binParentsWithDiffs != 0 {
bl := incblame.BlameBinaryFile(commitHash)
s.repo[commitHash][k] = bl
res.Files[k] = bl
continue
}
/*
// file is a binary
if binaryDiffs == validDiffs {
bl := incblame.BlameBinaryFile(commitHash)
s.repoSave(commitHash, k, bl)
res.Files[k] = bl
continue
}
// file is not a binary but one of the parents was a binary, need to use a regular git blame
if binaryParents != 0 {
bl, err := s.slowGitBlame(commitHash, k)
if err != nil {
return res, err
}
s.repoSave(commitHash, k, &bl)
res.Files[k] = &bl
continue
}*/
for i, diff := range diffs {
if diff == nil {
// same as parent
parent := parentHashes[i]
pb := s.repo.GetFileOptional(parent, k)
if pb != nil {
// exacly the same as parent, no changes
s.repo[commitHash][k] = pb
continue EACHFILE
}
}
}
parents := []incblame.Blame{}
for i, diff := range diffs {
if diff == nil {
// no change use prev
parentHash := parentHashes[i]
parentBlame := s.repo.GetFileOptional(parentHash, k)
if parentBlame == nil {
panic(fmt.Errorf("merge: no change for file recorded, but parent does not contain file:%v merge commit:%v parent:%v", k, commitHash, parentHash))
}
parents = append(parents, *parentBlame)
continue
}
pathPrev := diff.PathPrev
if pathPrev == "" {
// this is create, no parent blame
parents = append(parents, incblame.Blame{})
continue
}
parentHash := parentHashes[i]
parentBlame, err := s.repo.GetFileMust(parentHash, pathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case1 merge file. merge: %v %v", commitHash, err)
return
}
parents = append(parents, *parentBlame)
}
//fmt.Println("path", k)
diffs2 := []incblame.Diff{}
for _, ob := range diffs {
if ob == nil {
ob = &incblame.Diff{}
}
diffs2 = append(diffs2, *ob)
}
blame := incblame.ApplyMerge(parents, diffs2, commitHash, k)
s.repo[commitHash][k] = &blame
// only showing deletes and files changed in merge comparent to at least one parent
res.Files[k] = &blame
}
// for merge commits we need to use the most updated copy
// get a list of all files in all parents
files = map[string]bool{}
for _, p := range parentHashes {
filesInCommit := s.repo.GetCommitMust(p)
for f := range filesInCommit {
files[f] = true
}
}
root := ""
for f := range files {
alreadyAddedAbove := false
{
bl := s.repo.GetFileOptional(commitHash, f)
if bl != nil {
alreadyAddedAbove = true
}
}
if alreadyAddedAbove {
continue
}
var candidates []*incblame.Blame
for _, p := range parentHashes {
bl := s.repo.GetFileOptional(p, f)
if bl != nil {
candidates = append(candidates, bl)
}
}
// only one branch has the file
if len(candidates) == 1 {
// copy reference
s.repo[commitHash][f] = candidates[0]
continue
}
if len(candidates) == 0 {
panic("no file candidates")
}
// TODO: if more than one candidate we pick at random right now
// Need to check if this is correct? If no change at merge to any that means they are all the same?
// Or we need to check the last common parent and see? This was added in the previous design so possible is not needed anymore.
/*
if root == "" {
// TODO: this is not covered by unit tests
ts := time.Now()
// find common parent commit for all
root = s.graph.Parents.LastCommonParent(parentHashes)
dur := time.Since(ts)
if dur > time.Second {
fmt.Printf("took %v to find last common parent for %v res: %v", dur, parentHashes, root)
}
}*/
var res2 *incblame.Blame
for _, c := range candidates {
// unchanged
//if c.Commit == root {
// continue
//}
res2 = c
}
if res2 == nil {
var err error
// all are unchanged
res2, err = s.repo.GetFileMust(root, f)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case2 merge file. merge: %v %v", commitHash, err)
return
}
}
s.repo[commitHash][f] = res2
}
return
}
func (s *Process) slowGitBlame(commitHash string, filePath string) (res incblame.Blame, _ error) {
bl, err := gitblame2.Run(s.opts.RepoDir, commitHash, filePath)
//fmt.Println("running regular blame for file switching from bin mode to regular")
if err != nil {
return res, err
}
res.Commit = commitHash
for _, l := range bl.Lines {
l2 := &incblame.Line{}
l2.Commit = l.CommitHash
l2.Line = []byte(l.Content)
res.Lines = append(res.Lines, l2)
}
return
}
func (s *Process) RunGetAll() (_ []Result, err error) {
res := make(chan Result)
done := make(chan bool)
go func() {
err = s.Run(res)
done <- true
}()
var res2 []Result
for r := range res {
res2 = append(res2, r)
}
<-done
return res2, err
}
func (s *Process) gitLogPatches() (io.ReadCloser, error) {
// empty file at temp location to set an empty attributesFile
f, err := ioutil.TempFile("", "ripsrc")
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
args := []string{
"-c", "core.attributesFile=" + f.Name(),
"-c", "diff.renameLimit=10000",
"log",
"-p",
"-m",
"--date-order",
"--reverse",
"--no-abbrev-commit",
"--pretty=short",
}
if s.opts.CommitFromIncl != "" {
if s.opts.AllBranches {
for _, c := range s.opts.WantedBranchRefs {
args = append(args, c)
}
}
pf := ""
if s.opts.CommitFromMakeNonIncl {
pf = "..HEAD"
} else {
pf = "^..HEAD"
}
args = append(args, s.opts.CommitFromIncl+pf)
} else {
if s.opts.AllBranches {
args = append(args, "--all")
}
}
ctx := context.Background()
//if s.opts.DisableCache {
return gitexec.ExecPiped(ctx, s.gitCommand, s.opts.RepoDir, args)
//}
//return gitexec.ExecWithCache(ctx, s.gitCommand, s.opts.RepoDir, args)
}
| {
drainAndExit()
return err
} | conditional_block |
process.go | package process
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/pinpt/ripsrc/ripsrc/parentsgraph"
"github.com/pinpt/ripsrc/ripsrc/gitblame2"
"github.com/pinpt/ripsrc/ripsrc/pkg/logger"
"github.com/pinpt/ripsrc/ripsrc/history3/process/repo"
"github.com/pinpt/ripsrc/ripsrc/gitexec"
"github.com/pinpt/ripsrc/ripsrc/history3/incblame"
"github.com/pinpt/ripsrc/ripsrc/history3/process/parser"
)
type Process struct {
opts Opts
gitCommand string
graph *parentsgraph.Graph
childrenProcessed map[string]int
maxLenOfStoredTree int
mergePartsCommit string
// map[parent_diffed]parser.Commit
mergeParts map[string]parser.Commit
timing *Timing
repo repo.Repo
unloader *repo.Unloader
checkpointsDir string
lastProcessedCommitHash string
}
type Opts struct {
Logger logger.Logger
RepoDir string
// CheckpointsDir is the directory to store incremental data cache for this repo
// If empty, directory is created inside repoDir
CheckpointsDir string
// NoStrictResume forces incremental processing to avoid checking that it continues from the same commit in previously finished on. Since incrementals save a large number of previous commits, it works even starting on another commit.
NoStrictResume bool
// CommitFromIncl process starting from this commit (including this commit).
CommitFromIncl string
// CommitFromMakeNonIncl by default we start from passed commit and include it. Set CommitFromMakeNonIncl to true to avoid returning it, and skipping reading/writing checkpoint.
CommitFromMakeNonIncl bool
// DisableCache is unused.
DisableCache bool
// AllBranches set to true to process all branches. If false, processes commits starting from HEAD only.
AllBranches bool
// WantedBranchRefs filter branches. When CommitFromIncl and AllBranches is set this is required.
WantedBranchRefs []string
// ParentsGraph is optional graph of commits. Pass to reuse, if not passed will be created.
ParentsGraph *parentsgraph.Graph
}
type Result struct {
Commit string
Files map[string]*incblame.Blame
}
func New(opts Opts) *Process {
s := &Process{}
if opts.Logger == nil {
opts.Logger = logger.NewDefaultLogger(os.Stdout)
}
s.opts = opts
s.gitCommand = "git"
s.timing = &Timing{}
if opts.CheckpointsDir != "" {
s.checkpointsDir = filepath.Join(opts.CheckpointsDir, "pp-git-cache")
} else {
s.checkpointsDir = filepath.Join(opts.RepoDir, "pp-git-cache")
}
return s
}
func (s *Process) Timing() Timing {
return *s.timing
}
func (s *Process) initCheckpoints() error {
if s.opts.CommitFromIncl == "" {
s.repo = repo.New()
} else {
expectedCommit := ""
if s.opts.NoStrictResume {
// validation disabled
} else {
expectedCommit = s.opts.CommitFromIncl
}
reader := repo.NewCheckpointReader(s.opts.Logger)
r, err := reader.Read(s.checkpointsDir, expectedCommit)
if err != nil {
return fmt.Errorf("Could not read checkpoint: %v", err)
}
s.repo = r
}
s.unloader = repo.NewUnloader(s.repo)
return nil
}
func (s *Process) Run(resChan chan Result) error {
defer func() {
close(resChan)
}()
if s.opts.ParentsGraph != nil {
s.graph = s.opts.ParentsGraph
} else {
s.graph = parentsgraph.New(parentsgraph.Opts{
RepoDir: s.opts.RepoDir,
AllBranches: s.opts.AllBranches,
Logger: s.opts.Logger,
})
err := s.graph.Read()
if err != nil {
return err
}
}
s.childrenProcessed = map[string]int{}
r, err := s.gitLogPatches()
if err != nil {
return err
}
defer r.Close()
commits := make(chan parser.Commit)
p := parser.New(r)
done := make(chan bool)
go func() {
defer func() {
done <- true
}()
err := p.Run(commits)
if err != nil {
panic(err)
}
}()
drainAndExit := func() {
for range commits {
}
<-done
}
i := 0
for commit := range commits {
if i == 0 {
err := s.initCheckpoints()
if err != nil {
drainAndExit()
return err
}
}
i++
commit.Parents = s.graph.Parents[commit.Hash]
err := s.processCommit(resChan, commit)
if err != nil {
drainAndExit()
return err
}
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no need to copy files from prev
return
}
// copy unchanged from prev
p := commit.Parents[0]
files := s.repo.GetCommitMust(p)
for fp := range files {
// was in the diff changes, nothing to do
if _, ok := res.Files[fp]; ok {
continue
}
blame, err := s.repo.GetFileMust(p, fp)
if err != nil {
rerr = fmt.Errorf("could not get parent file for unchanged: %v err: %v", commit.Hash, err)
return
}
// copy reference
s.repo[commit.Hash][fp] = blame
}
return
}
const deletedPrefix = "@@@del@@@"
func (s *Process) processMergeCommit(commitHash string, parts map[string]parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commitHash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commitHash, dur)
s.timing.MergesTime += dur
s.timing.MergesCount++
}()
// note that commit exists (important for empty commits)
s.repo.AddCommit(commitHash)
//fmt.Println("processing merge commit", commitHash)
parentHashes := s.graph.Parents[commitHash]
parentCount := len(parentHashes)
res.Commit = commitHash
res.Files = map[string]*incblame.Blame{}
// parse and organize all diffs for access
diffs := map[string][]*incblame.Diff{}
hashToParOrd := map[string]int{}
for i, h := range parentHashes {
hashToParOrd[h] = i
}
for parHash, part := range parts {
for _, ch := range part.Changes {
diff := incblame.Parse(ch.Diff)
key := ""
if diff.Path != "" {
key = diff.Path
} else {
key = deletedPrefix + diff.PathPrev
}
par, ok := diffs[key]
if !ok {
par = make([]*incblame.Diff, parentCount, parentCount)
diffs[key] = par
}
parInd := hashToParOrd[parHash]
par[parInd] = &diff
}
}
// get a list of all files
files := map[string]bool{}
for k := range diffs {
files[k] = true
}
// process all files
EACHFILE:
for k := range files {
diffs := diffs[k]
isDelete := true
for _, diff := range diffs {
if diff != nil && diff.Path != "" {
isDelete = false
}
}
//fmt.Println("diffs")
//for i, d := range diffs {
// fmt.Println(i, d)
//}
if isDelete {
// only showing deletes and files changed in merge comparent to at least one parent
pathPrev := k[len(deletedPrefix):]
res.Files[pathPrev] = &incblame.Blame{Commit: commitHash}
continue
}
// below k == new file path
binaryDiffs := 0
for _, diff := range diffs {
if diff == nil {
continue
}
if diff.IsBinary {
binaryDiffs++
}
}
binParentsWithDiffs := 0
for i, diff := range diffs {
if diff == nil {
continue
}
if diff.PathPrev == "" {
// create
continue
}
parent := parentHashes[i]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for merge bin parent. merge: %v %v", commitHash, err)
return
}
if pb.IsBinary {
binParentsWithDiffs++
}
}
// do not try to resolve the diffs for binary files in merge commits
if binaryDiffs != 0 || binParentsWithDiffs != 0 {
bl := incblame.BlameBinaryFile(commitHash)
s.repo[commitHash][k] = bl
res.Files[k] = bl
continue
}
/*
// file is a binary
if binaryDiffs == validDiffs {
bl := incblame.BlameBinaryFile(commitHash)
s.repoSave(commitHash, k, bl)
res.Files[k] = bl
continue
}
// file is not a binary but one of the parents was a binary, need to use a regular git blame
if binaryParents != 0 {
bl, err := s.slowGitBlame(commitHash, k)
if err != nil {
return res, err
}
s.repoSave(commitHash, k, &bl)
res.Files[k] = &bl
continue
}*/
for i, diff := range diffs {
if diff == nil {
// same as parent
parent := parentHashes[i]
pb := s.repo.GetFileOptional(parent, k)
if pb != nil {
// exacly the same as parent, no changes
s.repo[commitHash][k] = pb
continue EACHFILE
}
}
}
parents := []incblame.Blame{}
for i, diff := range diffs {
if diff == nil {
// no change use prev
parentHash := parentHashes[i]
parentBlame := s.repo.GetFileOptional(parentHash, k)
if parentBlame == nil {
panic(fmt.Errorf("merge: no change for file recorded, but parent does not contain file:%v merge commit:%v parent:%v", k, commitHash, parentHash))
}
parents = append(parents, *parentBlame)
continue
}
pathPrev := diff.PathPrev
if pathPrev == "" {
// this is create, no parent blame
parents = append(parents, incblame.Blame{})
continue
}
parentHash := parentHashes[i]
parentBlame, err := s.repo.GetFileMust(parentHash, pathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case1 merge file. merge: %v %v", commitHash, err)
return
}
parents = append(parents, *parentBlame)
}
//fmt.Println("path", k)
diffs2 := []incblame.Diff{}
for _, ob := range diffs {
if ob == nil {
ob = &incblame.Diff{}
}
diffs2 = append(diffs2, *ob)
}
blame := incblame.ApplyMerge(parents, diffs2, commitHash, k)
s.repo[commitHash][k] = &blame
// only showing deletes and files changed in merge comparent to at least one parent
res.Files[k] = &blame
}
// for merge commits we need to use the most updated copy
// get a list of all files in all parents
files = map[string]bool{}
for _, p := range parentHashes {
filesInCommit := s.repo.GetCommitMust(p)
for f := range filesInCommit {
files[f] = true
}
}
root := ""
for f := range files {
alreadyAddedAbove := false
{
bl := s.repo.GetFileOptional(commitHash, f)
if bl != nil {
alreadyAddedAbove = true
}
}
if alreadyAddedAbove {
continue
}
var candidates []*incblame.Blame
for _, p := range parentHashes {
bl := s.repo.GetFileOptional(p, f)
if bl != nil {
candidates = append(candidates, bl)
}
}
// only one branch has the file
if len(candidates) == 1 {
// copy reference
s.repo[commitHash][f] = candidates[0]
continue
}
if len(candidates) == 0 {
panic("no file candidates")
}
// TODO: if more than one candidate we pick at random right now
// Need to check if this is correct? If no change at merge to any that means they are all the same?
// Or we need to check the last common parent and see? This was added in the previous design so possible is not needed anymore.
/*
if root == "" {
// TODO: this is not covered by unit tests
ts := time.Now()
// find common parent commit for all
root = s.graph.Parents.LastCommonParent(parentHashes)
dur := time.Since(ts)
if dur > time.Second {
fmt.Printf("took %v to find last common parent for %v res: %v", dur, parentHashes, root)
}
}*/
var res2 *incblame.Blame
for _, c := range candidates {
// unchanged
//if c.Commit == root {
// continue
//}
res2 = c
}
if res2 == nil {
var err error
// all are unchanged
res2, err = s.repo.GetFileMust(root, f)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case2 merge file. merge: %v %v", commitHash, err)
return
}
}
s.repo[commitHash][f] = res2
}
return
}
func (s *Process) slowGitBlame(commitHash string, filePath string) (res incblame.Blame, _ error) {
bl, err := gitblame2.Run(s.opts.RepoDir, commitHash, filePath)
//fmt.Println("running regular blame for file switching from bin mode to regular")
if err != nil {
return res, err
}
res.Commit = commitHash
for _, l := range bl.Lines {
l2 := &incblame.Line{}
l2.Commit = l.CommitHash
l2.Line = []byte(l.Content)
res.Lines = append(res.Lines, l2)
}
return
}
func (s *Process) RunGetAll() (_ []Result, err error) {
res := make(chan Result)
done := make(chan bool)
go func() {
err = s.Run(res)
done <- true
}()
var res2 []Result
for r := range res {
res2 = append(res2, r)
}
<-done
return res2, err
} | // empty file at temp location to set an empty attributesFile
f, err := ioutil.TempFile("", "ripsrc")
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
args := []string{
"-c", "core.attributesFile=" + f.Name(),
"-c", "diff.renameLimit=10000",
"log",
"-p",
"-m",
"--date-order",
"--reverse",
"--no-abbrev-commit",
"--pretty=short",
}
if s.opts.CommitFromIncl != "" {
if s.opts.AllBranches {
for _, c := range s.opts.WantedBranchRefs {
args = append(args, c)
}
}
pf := ""
if s.opts.CommitFromMakeNonIncl {
pf = "..HEAD"
} else {
pf = "^..HEAD"
}
args = append(args, s.opts.CommitFromIncl+pf)
} else {
if s.opts.AllBranches {
args = append(args, "--all")
}
}
ctx := context.Background()
//if s.opts.DisableCache {
return gitexec.ExecPiped(ctx, s.gitCommand, s.opts.RepoDir, args)
//}
//return gitexec.ExecWithCache(ctx, s.gitCommand, s.opts.RepoDir, args)
} |
func (s *Process) gitLogPatches() (io.ReadCloser, error) { | random_line_split |
process.go | package process
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/pinpt/ripsrc/ripsrc/parentsgraph"
"github.com/pinpt/ripsrc/ripsrc/gitblame2"
"github.com/pinpt/ripsrc/ripsrc/pkg/logger"
"github.com/pinpt/ripsrc/ripsrc/history3/process/repo"
"github.com/pinpt/ripsrc/ripsrc/gitexec"
"github.com/pinpt/ripsrc/ripsrc/history3/incblame"
"github.com/pinpt/ripsrc/ripsrc/history3/process/parser"
)
type Process struct {
opts Opts
gitCommand string
graph *parentsgraph.Graph
childrenProcessed map[string]int
maxLenOfStoredTree int
mergePartsCommit string
// map[parent_diffed]parser.Commit
mergeParts map[string]parser.Commit
timing *Timing
repo repo.Repo
unloader *repo.Unloader
checkpointsDir string
lastProcessedCommitHash string
}
type Opts struct {
Logger logger.Logger
RepoDir string
// CheckpointsDir is the directory to store incremental data cache for this repo
// If empty, directory is created inside repoDir
CheckpointsDir string
// NoStrictResume forces incremental processing to avoid checking that it continues from the same commit in previously finished on. Since incrementals save a large number of previous commits, it works even starting on another commit.
NoStrictResume bool
// CommitFromIncl process starting from this commit (including this commit).
CommitFromIncl string
// CommitFromMakeNonIncl by default we start from passed commit and include it. Set CommitFromMakeNonIncl to true to avoid returning it, and skipping reading/writing checkpoint.
CommitFromMakeNonIncl bool
// DisableCache is unused.
DisableCache bool
// AllBranches set to true to process all branches. If false, processes commits starting from HEAD only.
AllBranches bool
// WantedBranchRefs filter branches. When CommitFromIncl and AllBranches is set this is required.
WantedBranchRefs []string
// ParentsGraph is optional graph of commits. Pass to reuse, if not passed will be created.
ParentsGraph *parentsgraph.Graph
}
type Result struct {
Commit string
Files map[string]*incblame.Blame
}
func New(opts Opts) *Process {
s := &Process{}
if opts.Logger == nil {
opts.Logger = logger.NewDefaultLogger(os.Stdout)
}
s.opts = opts
s.gitCommand = "git"
s.timing = &Timing{}
if opts.CheckpointsDir != "" {
s.checkpointsDir = filepath.Join(opts.CheckpointsDir, "pp-git-cache")
} else {
s.checkpointsDir = filepath.Join(opts.RepoDir, "pp-git-cache")
}
return s
}
func (s *Process) | () Timing {
return *s.timing
}
func (s *Process) initCheckpoints() error {
if s.opts.CommitFromIncl == "" {
s.repo = repo.New()
} else {
expectedCommit := ""
if s.opts.NoStrictResume {
// validation disabled
} else {
expectedCommit = s.opts.CommitFromIncl
}
reader := repo.NewCheckpointReader(s.opts.Logger)
r, err := reader.Read(s.checkpointsDir, expectedCommit)
if err != nil {
return fmt.Errorf("Could not read checkpoint: %v", err)
}
s.repo = r
}
s.unloader = repo.NewUnloader(s.repo)
return nil
}
func (s *Process) Run(resChan chan Result) error {
defer func() {
close(resChan)
}()
if s.opts.ParentsGraph != nil {
s.graph = s.opts.ParentsGraph
} else {
s.graph = parentsgraph.New(parentsgraph.Opts{
RepoDir: s.opts.RepoDir,
AllBranches: s.opts.AllBranches,
Logger: s.opts.Logger,
})
err := s.graph.Read()
if err != nil {
return err
}
}
s.childrenProcessed = map[string]int{}
r, err := s.gitLogPatches()
if err != nil {
return err
}
defer r.Close()
commits := make(chan parser.Commit)
p := parser.New(r)
done := make(chan bool)
go func() {
defer func() {
done <- true
}()
err := p.Run(commits)
if err != nil {
panic(err)
}
}()
drainAndExit := func() {
for range commits {
}
<-done
}
i := 0
for commit := range commits {
if i == 0 {
err := s.initCheckpoints()
if err != nil {
drainAndExit()
return err
}
}
i++
commit.Parents = s.graph.Parents[commit.Hash]
err := s.processCommit(resChan, commit)
if err != nil {
drainAndExit()
return err
}
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no need to copy files from prev
return
}
// copy unchanged from prev
p := commit.Parents[0]
files := s.repo.GetCommitMust(p)
for fp := range files {
// was in the diff changes, nothing to do
if _, ok := res.Files[fp]; ok {
continue
}
blame, err := s.repo.GetFileMust(p, fp)
if err != nil {
rerr = fmt.Errorf("could not get parent file for unchanged: %v err: %v", commit.Hash, err)
return
}
// copy reference
s.repo[commit.Hash][fp] = blame
}
return
}
const deletedPrefix = "@@@del@@@"
func (s *Process) processMergeCommit(commitHash string, parts map[string]parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commitHash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commitHash, dur)
s.timing.MergesTime += dur
s.timing.MergesCount++
}()
// note that commit exists (important for empty commits)
s.repo.AddCommit(commitHash)
//fmt.Println("processing merge commit", commitHash)
parentHashes := s.graph.Parents[commitHash]
parentCount := len(parentHashes)
res.Commit = commitHash
res.Files = map[string]*incblame.Blame{}
// parse and organize all diffs for access
diffs := map[string][]*incblame.Diff{}
hashToParOrd := map[string]int{}
for i, h := range parentHashes {
hashToParOrd[h] = i
}
for parHash, part := range parts {
for _, ch := range part.Changes {
diff := incblame.Parse(ch.Diff)
key := ""
if diff.Path != "" {
key = diff.Path
} else {
key = deletedPrefix + diff.PathPrev
}
par, ok := diffs[key]
if !ok {
par = make([]*incblame.Diff, parentCount, parentCount)
diffs[key] = par
}
parInd := hashToParOrd[parHash]
par[parInd] = &diff
}
}
// get a list of all files
files := map[string]bool{}
for k := range diffs {
files[k] = true
}
// process all files
EACHFILE:
for k := range files {
diffs := diffs[k]
isDelete := true
for _, diff := range diffs {
if diff != nil && diff.Path != "" {
isDelete = false
}
}
//fmt.Println("diffs")
//for i, d := range diffs {
// fmt.Println(i, d)
//}
if isDelete {
// only showing deletes and files changed in merge comparent to at least one parent
pathPrev := k[len(deletedPrefix):]
res.Files[pathPrev] = &incblame.Blame{Commit: commitHash}
continue
}
// below k == new file path
binaryDiffs := 0
for _, diff := range diffs {
if diff == nil {
continue
}
if diff.IsBinary {
binaryDiffs++
}
}
binParentsWithDiffs := 0
for i, diff := range diffs {
if diff == nil {
continue
}
if diff.PathPrev == "" {
// create
continue
}
parent := parentHashes[i]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for merge bin parent. merge: %v %v", commitHash, err)
return
}
if pb.IsBinary {
binParentsWithDiffs++
}
}
// do not try to resolve the diffs for binary files in merge commits
if binaryDiffs != 0 || binParentsWithDiffs != 0 {
bl := incblame.BlameBinaryFile(commitHash)
s.repo[commitHash][k] = bl
res.Files[k] = bl
continue
}
/*
// file is a binary
if binaryDiffs == validDiffs {
bl := incblame.BlameBinaryFile(commitHash)
s.repoSave(commitHash, k, bl)
res.Files[k] = bl
continue
}
// file is not a binary but one of the parents was a binary, need to use a regular git blame
if binaryParents != 0 {
bl, err := s.slowGitBlame(commitHash, k)
if err != nil {
return res, err
}
s.repoSave(commitHash, k, &bl)
res.Files[k] = &bl
continue
}*/
for i, diff := range diffs {
if diff == nil {
// same as parent
parent := parentHashes[i]
pb := s.repo.GetFileOptional(parent, k)
if pb != nil {
// exacly the same as parent, no changes
s.repo[commitHash][k] = pb
continue EACHFILE
}
}
}
parents := []incblame.Blame{}
for i, diff := range diffs {
if diff == nil {
// no change use prev
parentHash := parentHashes[i]
parentBlame := s.repo.GetFileOptional(parentHash, k)
if parentBlame == nil {
panic(fmt.Errorf("merge: no change for file recorded, but parent does not contain file:%v merge commit:%v parent:%v", k, commitHash, parentHash))
}
parents = append(parents, *parentBlame)
continue
}
pathPrev := diff.PathPrev
if pathPrev == "" {
// this is create, no parent blame
parents = append(parents, incblame.Blame{})
continue
}
parentHash := parentHashes[i]
parentBlame, err := s.repo.GetFileMust(parentHash, pathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case1 merge file. merge: %v %v", commitHash, err)
return
}
parents = append(parents, *parentBlame)
}
//fmt.Println("path", k)
diffs2 := []incblame.Diff{}
for _, ob := range diffs {
if ob == nil {
ob = &incblame.Diff{}
}
diffs2 = append(diffs2, *ob)
}
blame := incblame.ApplyMerge(parents, diffs2, commitHash, k)
s.repo[commitHash][k] = &blame
// only showing deletes and files changed in merge comparent to at least one parent
res.Files[k] = &blame
}
// for merge commits we need to use the most updated copy
// get a list of all files in all parents
files = map[string]bool{}
for _, p := range parentHashes {
filesInCommit := s.repo.GetCommitMust(p)
for f := range filesInCommit {
files[f] = true
}
}
root := ""
for f := range files {
alreadyAddedAbove := false
{
bl := s.repo.GetFileOptional(commitHash, f)
if bl != nil {
alreadyAddedAbove = true
}
}
if alreadyAddedAbove {
continue
}
var candidates []*incblame.Blame
for _, p := range parentHashes {
bl := s.repo.GetFileOptional(p, f)
if bl != nil {
candidates = append(candidates, bl)
}
}
// only one branch has the file
if len(candidates) == 1 {
// copy reference
s.repo[commitHash][f] = candidates[0]
continue
}
if len(candidates) == 0 {
panic("no file candidates")
}
// TODO: if more than one candidate we pick at random right now
// Need to check if this is correct? If no change at merge to any that means they are all the same?
// Or we need to check the last common parent and see? This was added in the previous design so possible is not needed anymore.
/*
if root == "" {
// TODO: this is not covered by unit tests
ts := time.Now()
// find common parent commit for all
root = s.graph.Parents.LastCommonParent(parentHashes)
dur := time.Since(ts)
if dur > time.Second {
fmt.Printf("took %v to find last common parent for %v res: %v", dur, parentHashes, root)
}
}*/
var res2 *incblame.Blame
for _, c := range candidates {
// unchanged
//if c.Commit == root {
// continue
//}
res2 = c
}
if res2 == nil {
var err error
// all are unchanged
res2, err = s.repo.GetFileMust(root, f)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case2 merge file. merge: %v %v", commitHash, err)
return
}
}
s.repo[commitHash][f] = res2
}
return
}
func (s *Process) slowGitBlame(commitHash string, filePath string) (res incblame.Blame, _ error) {
bl, err := gitblame2.Run(s.opts.RepoDir, commitHash, filePath)
//fmt.Println("running regular blame for file switching from bin mode to regular")
if err != nil {
return res, err
}
res.Commit = commitHash
for _, l := range bl.Lines {
l2 := &incblame.Line{}
l2.Commit = l.CommitHash
l2.Line = []byte(l.Content)
res.Lines = append(res.Lines, l2)
}
return
}
func (s *Process) RunGetAll() (_ []Result, err error) {
res := make(chan Result)
done := make(chan bool)
go func() {
err = s.Run(res)
done <- true
}()
var res2 []Result
for r := range res {
res2 = append(res2, r)
}
<-done
return res2, err
}
func (s *Process) gitLogPatches() (io.ReadCloser, error) {
// empty file at temp location to set an empty attributesFile
f, err := ioutil.TempFile("", "ripsrc")
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
args := []string{
"-c", "core.attributesFile=" + f.Name(),
"-c", "diff.renameLimit=10000",
"log",
"-p",
"-m",
"--date-order",
"--reverse",
"--no-abbrev-commit",
"--pretty=short",
}
if s.opts.CommitFromIncl != "" {
if s.opts.AllBranches {
for _, c := range s.opts.WantedBranchRefs {
args = append(args, c)
}
}
pf := ""
if s.opts.CommitFromMakeNonIncl {
pf = "..HEAD"
} else {
pf = "^..HEAD"
}
args = append(args, s.opts.CommitFromIncl+pf)
} else {
if s.opts.AllBranches {
args = append(args, "--all")
}
}
ctx := context.Background()
//if s.opts.DisableCache {
return gitexec.ExecPiped(ctx, s.gitCommand, s.opts.RepoDir, args)
//}
//return gitexec.ExecWithCache(ctx, s.gitCommand, s.opts.RepoDir, args)
}
| Timing | identifier_name |
lstm.py | # coding=utf-8
import random
import string
import zipfile
import numpy as np
import tensorflow as tf
from not_mnist.img_pickle import save_obj, load_pickle
from not_mnist.load_data import maybe_download
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
data_set = load_pickle('text8_text.pickle')
if data_set is None:
# load data
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016, url=url)
# read data
text = read_data(filename)
print('Data size %d' % len(text))
save_obj('text8_text.pickle', text)
else:
text = data_set
# Create a small validation set.
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.place | ollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
sample_output, sample_state = lstm_cell(
sample_input, saved_sample_output, saved_sample_state)
with tf.control_dependencies([saved_sample_output.assign(sample_output),
saved_sample_state.assign(sample_state)]):
sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))
num_steps = 7001
summary_frequency = 100
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
mean_loss = 0
for step in range(num_steps):
batches = train_batches.next()
feed_dict = dict()
for i in range(num_unrollings + 1):
feed_dict[train_data[i]] = batches[i]
_, l, predictions, lr = session.run(
[optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)
mean_loss += l
if step % summary_frequency == 0:
if step > 0:
mean_loss /= summary_frequency
# The mean loss is an estimate of the loss over the last few batches.
print(
'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))
mean_loss = 0
labels = np.concatenate(list(batches)[1:])
print('Minibatch perplexity: %.2f' % float(
np.exp(logprob(predictions, labels))))
if step % (summary_frequency * 10) == 0:
# Generate some samples.
print('=' * 80)
for _ in range(5):
feed = sample(random_distribution())
sentence = characters(feed)[0]
reset_sample_state.run()
for _ in range(79):
prediction = sample_prediction.eval({sample_input: feed})
feed = sample(prediction)
sentence += characters(feed)[0]
print(sentence)
print('=' * 80)
# Measure validation set perplexity.
reset_sample_state.run()
valid_logprob = 0
for _ in range(valid_size):
b = valid_batches.next()
predictions = sample_prediction.eval({sample_input: b[0]})
valid_logprob = valid_logprob + logprob(predictions, b[1])
print('Validation set perplexity: %.2f' % float(np.exp(
valid_logprob / valid_size)))
| holder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unr | conditional_block |
lstm.py | # coding=utf-8
import random
import string
import zipfile
import numpy as np
import tensorflow as tf
from not_mnist.img_pickle import save_obj, load_pickle
from not_mnist.load_data import maybe_download
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
data_set = load_pickle('text8_text.pickle')
if data_set is None:
# load data
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016, url=url)
# read data
text = read_data(filename)
print('Data size %d' % len(text))
save_obj('text8_text.pickle', text)
else:
text = data_set
# Create a small validation set.
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
" |
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
sample_output, sample_state = lstm_cell(
sample_input, saved_sample_output, saved_sample_state)
with tf.control_dependencies([saved_sample_output.assign(sample_output),
saved_sample_state.assign(sample_state)]):
sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))
num_steps = 7001
summary_frequency = 100
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
mean_loss = 0
for step in range(num_steps):
batches = train_batches.next()
feed_dict = dict()
for i in range(num_unrollings + 1):
feed_dict[train_data[i]] = batches[i]
_, l, predictions, lr = session.run(
[optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)
mean_loss += l
if step % summary_frequency == 0:
if step > 0:
mean_loss /= summary_frequency
# The mean loss is an estimate of the loss over the last few batches.
print(
'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))
mean_loss = 0
labels = np.concatenate(list(batches)[1:])
print('Minibatch perplexity: %.2f' % float(
np.exp(logprob(predictions, labels))))
if step % (summary_frequency * 10) == 0:
# Generate some samples.
print('=' * 80)
for _ in range(5):
feed = sample(random_distribution())
sentence = characters(feed)[0]
reset_sample_state.run()
for _ in range(79):
prediction = sample_prediction.eval({sample_input: feed})
feed = sample(prediction)
sentence += characters(feed)[0]
print(sentence)
print('=' * 80)
# Measure validation set perplexity.
reset_sample_state.run()
valid_logprob = 0
for _ in range(valid_size):
b = valid_batches.next()
predictions = sample_prediction.eval({sample_input: b[0]})
valid_logprob = valid_logprob + logprob(predictions, b[1])
print('Validation set perplexity: %.2f' % float(np.exp(
valid_logprob / valid_size)))
| ""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
| identifier_body |
lstm.py | # coding=utf-8
import random
import string
import zipfile
import numpy as np
import tensorflow as tf
from not_mnist.img_pickle import save_obj, load_pickle
from not_mnist.load_data import maybe_download
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
data_set = load_pickle('text8_text.pickle')
if data_set is None:
# load data
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016, url=url)
# read data
text = read_data(filename)
print('Data size %d' % len(text))
save_obj('text8_text.pickle', text)
else:
text = data_set
# Create a small validation set.
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def b | batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
sample_output, sample_state = lstm_cell(
sample_input, saved_sample_output, saved_sample_state)
with tf.control_dependencies([saved_sample_output.assign(sample_output),
saved_sample_state.assign(sample_state)]):
sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))
num_steps = 7001
summary_frequency = 100
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
mean_loss = 0
for step in range(num_steps):
batches = train_batches.next()
feed_dict = dict()
for i in range(num_unrollings + 1):
feed_dict[train_data[i]] = batches[i]
_, l, predictions, lr = session.run(
[optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)
mean_loss += l
if step % summary_frequency == 0:
if step > 0:
mean_loss /= summary_frequency
# The mean loss is an estimate of the loss over the last few batches.
print(
'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))
mean_loss = 0
labels = np.concatenate(list(batches)[1:])
print('Minibatch perplexity: %.2f' % float(
np.exp(logprob(predictions, labels))))
if step % (summary_frequency * 10) == 0:
# Generate some samples.
print('=' * 80)
for _ in range(5):
feed = sample(random_distribution())
sentence = characters(feed)[0]
reset_sample_state.run()
for _ in range(79):
prediction = sample_prediction.eval({sample_input: feed})
feed = sample(prediction)
sentence += characters(feed)[0]
print(sentence)
print('=' * 80)
# Measure validation set perplexity.
reset_sample_state.run()
valid_logprob = 0
for _ in range(valid_size):
b = valid_batches.next()
predictions = sample_prediction.eval({sample_input: b[0]})
valid_logprob = valid_logprob + logprob(predictions, b[1])
print('Validation set perplexity: %.2f' % float(np.exp(
valid_logprob / valid_size)))
| atches2string( | identifier_name |
lstm.py | # coding=utf-8
import random
import string
import zipfile
import numpy as np
import tensorflow as tf
from not_mnist.img_pickle import save_obj, load_pickle
from not_mnist.load_data import maybe_download
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
data_set = load_pickle('text8_text.pickle')
if data_set is None:
# load data
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016, url=url)
# read data
text = read_data(filename)
print('Data size %d' % len(text))
save_obj('text8_text.pickle', text)
else:
text = data_set
# Create a small validation set.
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next())) | """Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
sample_output, sample_state = lstm_cell(
sample_input, saved_sample_output, saved_sample_state)
with tf.control_dependencies([saved_sample_output.assign(sample_output),
saved_sample_state.assign(sample_state)]):
sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))
num_steps = 7001
summary_frequency = 100
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
mean_loss = 0
for step in range(num_steps):
batches = train_batches.next()
feed_dict = dict()
for i in range(num_unrollings + 1):
feed_dict[train_data[i]] = batches[i]
_, l, predictions, lr = session.run(
[optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)
mean_loss += l
if step % summary_frequency == 0:
if step > 0:
mean_loss /= summary_frequency
# The mean loss is an estimate of the loss over the last few batches.
print(
'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))
mean_loss = 0
labels = np.concatenate(list(batches)[1:])
print('Minibatch perplexity: %.2f' % float(
np.exp(logprob(predictions, labels))))
if step % (summary_frequency * 10) == 0:
# Generate some samples.
print('=' * 80)
for _ in range(5):
feed = sample(random_distribution())
sentence = characters(feed)[0]
reset_sample_state.run()
for _ in range(79):
prediction = sample_prediction.eval({sample_input: feed})
feed = sample(prediction)
sentence += characters(feed)[0]
print(sentence)
print('=' * 80)
# Measure validation set perplexity.
reset_sample_state.run()
valid_logprob = 0
for _ in range(valid_size):
b = valid_batches.next()
predictions = sample_prediction.eval({sample_input: b[0]})
valid_logprob = valid_logprob + logprob(predictions, b[1])
print('Validation set perplexity: %.2f' % float(np.exp(
valid_logprob / valid_size))) |
def logprob(predictions, labels):
# prevent negative probability | random_line_split |
dataset.py |
# This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def | (self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
| float_feature | identifier_name |
dataset.py |
# This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
|
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def float_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
| return '%s/%s_%03d.tfrecord' % (output_dir, name, idx) | identifier_body |
dataset.py | # This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path): | if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def float_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) | with tf.name_scope(None, "read_dataset_from_tfrecords") as scope: | random_line_split |
dataset.py |
# This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
|
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def float_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
| train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
} | conditional_block |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx | else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {}
| {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} | conditional_block |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn | (
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {}
| spawn_exec_module | identifier_name |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = { |
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {} | struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
} | random_line_split |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! A crate that hosts a common definitions that are relevant for the pallet-contracts.
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::traits::{Currency, Time};
#[cfg(feature = "std")] |
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ComposableExecResult {
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayExpectedOutput {
/// Effect would be the modified storage key
Storage {
key: Vec<Vec<u8>>,
// key: Vec<sp_core::storage::StorageKey>,
// value: Vec<Option<sp_core::storage::StorageData>>,
value: Vec<Option<Bytes>>,
},
/// Expect events as a result of that call - will be described with signature
/// and check against the corresponding types upon receiving
Events { signatures: Vec<Bytes> },
/// Yet another event or Storage output
Extrinsic {
/// Optionally expect dispatch of extrinsic only at the certain block height
block_height: Option<u64>,
},
/// Yet another event or Storage output. If expecting output u can define its type format.
Output { output: Bytes },
}
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Extra payload in case the message is signed ro has other custom parameters required by linking protocol.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExtraMessagePayload {
pub signer: Bytes,
/// Encoded utf-8 string of module name that implements requested entrypoint
pub module_name: Bytes,
/// Encoded utf-8 string of method name that implements requested entrypoint
pub method_name: Bytes,
/// Encoded call bytes
pub call_bytes: Bytes,
/// Encoded tx signature
pub signature: Bytes,
/// Encoded extras to that transctions, like versions and gas price /tips for miners. Check GenericExtra for more info.
pub extra: Bytes,
/// Encoded and signed transaction ready to send
pub tx_signed: Bytes,
/// Custom message bytes, that would have to be decoded by the receiving end.
/// Could be utilized by custom transmission medium (like Substrate's XCMP)
pub custom_payload: Option<Bytes>,
}
/// Retrieves all available gateways for a given ChainId.
/// Currently returns a vector with a single hardcoded result.
/// Eventually this will search all known gateways on pallet-xdns.
pub fn retrieve_gateway_pointers(gateway_id: ChainId) -> Result<Vec<GatewayPointer>, &'static str> {
Ok(vec![GatewayPointer {
id: gateway_id,
gateway_type: GatewayType::ProgrammableExternal(0),
vendor: GatewayVendor::Substrate,
}])
} | use serde::{Deserialize, Serialize};
#[cfg(feature = "no_std")]
use sp_runtime::RuntimeDebug as Debug; | random_line_split |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! A crate that hosts a common definitions that are relevant for the pallet-contracts.
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::traits::{Currency, Time};
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "no_std")]
use sp_runtime::RuntimeDebug as Debug;
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum | {
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayExpectedOutput {
/// Effect would be the modified storage key
Storage {
key: Vec<Vec<u8>>,
// key: Vec<sp_core::storage::StorageKey>,
// value: Vec<Option<sp_core::storage::StorageData>>,
value: Vec<Option<Bytes>>,
},
/// Expect events as a result of that call - will be described with signature
/// and check against the corresponding types upon receiving
Events { signatures: Vec<Bytes> },
/// Yet another event or Storage output
Extrinsic {
/// Optionally expect dispatch of extrinsic only at the certain block height
block_height: Option<u64>,
},
/// Yet another event or Storage output. If expecting output u can define its type format.
Output { output: Bytes },
}
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Extra payload in case the message is signed ro has other custom parameters required by linking protocol.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExtraMessagePayload {
pub signer: Bytes,
/// Encoded utf-8 string of module name that implements requested entrypoint
pub module_name: Bytes,
/// Encoded utf-8 string of method name that implements requested entrypoint
pub method_name: Bytes,
/// Encoded call bytes
pub call_bytes: Bytes,
/// Encoded tx signature
pub signature: Bytes,
/// Encoded extras to that transctions, like versions and gas price /tips for miners. Check GenericExtra for more info.
pub extra: Bytes,
/// Encoded and signed transaction ready to send
pub tx_signed: Bytes,
/// Custom message bytes, that would have to be decoded by the receiving end.
/// Could be utilized by custom transmission medium (like Substrate's XCMP)
pub custom_payload: Option<Bytes>,
}
/// Retrieves all available gateways for a given ChainId.
/// Currently returns a vector with a single hardcoded result.
/// Eventually this will search all known gateways on pallet-xdns.
pub fn retrieve_gateway_pointers(gateway_id: ChainId) -> Result<Vec<GatewayPointer>, &'static str> {
Ok(vec![GatewayPointer {
id: gateway_id,
gateway_type: GatewayType::ProgrammableExternal(0),
vendor: GatewayVendor::Substrate,
}])
}
| ComposableExecResult | identifier_name |
monitors_test.go | package mackerel
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestFindMonitors(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/api/v0/monitors" {
t.Error("request URL should be /api/v0/monitors but :", req.URL.Path)
}
respJSON, _ := json.Marshal(map[string][]map[string]interface{}{
"monitors": {
{
"id": "2cSZzK3XfmG",
"type": "connectivity",
"memo": "connectivity monitor",
"scopes": []string{},
"excludeScopes": []string{},
},
{
"id": "2c5bLca8d",
"type": "external",
"name": "testMonitorExternal",
"memo": "this monitor checks example.com.",
"method": "GET",
"url": "https://www.example.com/",
"maxCheckAttempts": 3,
"service": "someService",
"notificationInterval": 60,
"responseTimeCritical": 5000,
"responseTimeWarning": 10000,
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 {
t.Error("request sends json including responseTimeWarning but: ", m)
}
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T) |
func BenchmarkDecodeMonitor(b *testing.B) {
for i := 0; i < b.N; i++ {
decodeMonitorsJSON(b)
}
}
func decodeMonitorsJSON(t testing.TB) []Monitor {
var data struct {
Monitors []json.RawMessage `json:"monitors"`
}
if err := json.NewDecoder(strings.NewReader(monitorsjson)).Decode(&data); err != nil {
t.Error(err)
}
ms := make([]Monitor, 0, len(data.Monitors))
for _, rawmes := range data.Monitors {
m, err := decodeMonitor(rawmes)
if err != nil {
t.Error(err)
}
ms = append(ms, m)
}
return ms
}
var monitorsToBeEncoded = []Monitor{
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Warning: 0.000000,
Critical: 400000.000000,
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Warning: 50.000000,
Critical: 0.000000,
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Warning: 0.000000,
Critical: 0.000000,
},
}
func TestEncodeMonitor(t *testing.T) {
b, err := json.MarshalIndent(monitorsToBeEncoded, "", " ")
if err != nil {
t.Error("err shoud be nil but: ", err)
}
want := `[
{
"id": "2cSZzK3XfmB",
"warning": 0,
"critical": 400000
},
{
"id": "2cSZzK3XfmC",
"warning": 50,
"critical": 0
},
{
"id": "2cSZzK3XfmE",
"warning": 0,
"critical": 0
}
]`
if got := string(b); got != want {
t.Errorf("got %v, want %v", got, want)
}
}
| {
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
} | identifier_body |
monitors_test.go | package mackerel
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestFindMonitors(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/api/v0/monitors" {
t.Error("request URL should be /api/v0/monitors but :", req.URL.Path)
}
respJSON, _ := json.Marshal(map[string][]map[string]interface{}{
"monitors": {
{
"id": "2cSZzK3XfmG",
"type": "connectivity",
"memo": "connectivity monitor",
"scopes": []string{},
"excludeScopes": []string{},
},
{
"id": "2c5bLca8d",
"type": "external",
"name": "testMonitorExternal",
"memo": "this monitor checks example.com.",
"method": "GET",
"url": "https://www.example.com/",
"maxCheckAttempts": 3,
"service": "someService",
"notificationInterval": 60,
"responseTimeCritical": 5000,
"responseTimeWarning": 10000,
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 {
t.Error("request sends json including responseTimeWarning but: ", m)
}
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T) {
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
}
func BenchmarkDecodeMonitor(b *testing.B) {
for i := 0; i < b.N; i++ {
decodeMonitorsJSON(b)
}
}
func decodeMonitorsJSON(t testing.TB) []Monitor {
var data struct {
Monitors []json.RawMessage `json:"monitors"`
}
if err := json.NewDecoder(strings.NewReader(monitorsjson)).Decode(&data); err != nil {
t.Error(err)
}
ms := make([]Monitor, 0, len(data.Monitors))
for _, rawmes := range data.Monitors {
m, err := decodeMonitor(rawmes)
if err != nil {
t.Error(err)
}
ms = append(ms, m)
}
return ms
}
var monitorsToBeEncoded = []Monitor{
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Warning: 0.000000,
Critical: 400000.000000,
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Warning: 50.000000,
Critical: 0.000000,
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Warning: 0.000000,
Critical: 0.000000,
},
}
func | (t *testing.T) {
b, err := json.MarshalIndent(monitorsToBeEncoded, "", " ")
if err != nil {
t.Error("err shoud be nil but: ", err)
}
want := `[
{
"id": "2cSZzK3XfmB",
"warning": 0,
"critical": 400000
},
{
"id": "2cSZzK3XfmC",
"warning": 50,
"critical": 0
},
{
"id": "2cSZzK3XfmE",
"warning": 0,
"critical": 0
}
]`
if got := string(b); got != want {
t.Errorf("got %v, want %v", got, want)
}
}
| TestEncodeMonitor | identifier_name |
monitors_test.go | package mackerel
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestFindMonitors(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/api/v0/monitors" {
t.Error("request URL should be /api/v0/monitors but :", req.URL.Path)
}
respJSON, _ := json.Marshal(map[string][]map[string]interface{}{
"monitors": {
{
"id": "2cSZzK3XfmG",
"type": "connectivity",
"memo": "connectivity monitor",
"scopes": []string{},
"excludeScopes": []string{},
},
{
"id": "2c5bLca8d",
"type": "external",
"name": "testMonitorExternal",
"memo": "this monitor checks example.com.",
"method": "GET",
"url": "https://www.example.com/",
"maxCheckAttempts": 3,
"service": "someService",
"notificationInterval": 60,
"responseTimeCritical": 5000,
"responseTimeWarning": 10000,
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 {
t.Error("request sends json including responseTimeWarning but: ", m)
}
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0, |
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T) {
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
}
func BenchmarkDecodeMonitor(b *testing.B) {
for i := 0; i < b.N; i++ {
decodeMonitorsJSON(b)
}
}
func decodeMonitorsJSON(t testing.TB) []Monitor {
var data struct {
Monitors []json.RawMessage `json:"monitors"`
}
if err := json.NewDecoder(strings.NewReader(monitorsjson)).Decode(&data); err != nil {
t.Error(err)
}
ms := make([]Monitor, 0, len(data.Monitors))
for _, rawmes := range data.Monitors {
m, err := decodeMonitor(rawmes)
if err != nil {
t.Error(err)
}
ms = append(ms, m)
}
return ms
}
var monitorsToBeEncoded = []Monitor{
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Warning: 0.000000,
Critical: 400000.000000,
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Warning: 50.000000,
Critical: 0.000000,
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Warning: 0.000000,
Critical: 0.000000,
},
}
func TestEncodeMonitor(t *testing.T) {
b, err := json.MarshalIndent(monitorsToBeEncoded, "", " ")
if err != nil {
t.Error("err shoud be nil but: ", err)
}
want := `[
{
"id": "2cSZzK3XfmB",
"warning": 0,
"critical": 400000
},
{
"id": "2cSZzK3XfmC",
"warning": 50,
"critical": 0
},
{
"id": "2cSZzK3XfmE",
"warning": 0,
"critical": 0
}
]`
if got := string(b); got != want {
t.Errorf("got %v, want %v", got, want)
}
} | "notificationInterval": 60
}
]
}
` | random_line_split |
monitors_test.go | package mackerel
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestFindMonitors(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/api/v0/monitors" {
t.Error("request URL should be /api/v0/monitors but :", req.URL.Path)
}
respJSON, _ := json.Marshal(map[string][]map[string]interface{}{
"monitors": {
{
"id": "2cSZzK3XfmG",
"type": "connectivity",
"memo": "connectivity monitor",
"scopes": []string{},
"excludeScopes": []string{},
},
{
"id": "2c5bLca8d",
"type": "external",
"name": "testMonitorExternal",
"memo": "this monitor checks example.com.",
"method": "GET",
"url": "https://www.example.com/",
"maxCheckAttempts": 3,
"service": "someService",
"notificationInterval": 60,
"responseTimeCritical": 5000,
"responseTimeWarning": 10000,
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 |
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T) {
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
}
func BenchmarkDecodeMonitor(b *testing.B) {
for i := 0; i < b.N; i++ {
decodeMonitorsJSON(b)
}
}
func decodeMonitorsJSON(t testing.TB) []Monitor {
var data struct {
Monitors []json.RawMessage `json:"monitors"`
}
if err := json.NewDecoder(strings.NewReader(monitorsjson)).Decode(&data); err != nil {
t.Error(err)
}
ms := make([]Monitor, 0, len(data.Monitors))
for _, rawmes := range data.Monitors {
m, err := decodeMonitor(rawmes)
if err != nil {
t.Error(err)
}
ms = append(ms, m)
}
return ms
}
var monitorsToBeEncoded = []Monitor{
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Warning: 0.000000,
Critical: 400000.000000,
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Warning: 50.000000,
Critical: 0.000000,
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Warning: 0.000000,
Critical: 0.000000,
},
}
func TestEncodeMonitor(t *testing.T) {
b, err := json.MarshalIndent(monitorsToBeEncoded, "", " ")
if err != nil {
t.Error("err shoud be nil but: ", err)
}
want := `[
{
"id": "2cSZzK3XfmB",
"warning": 0,
"critical": 400000
},
{
"id": "2cSZzK3XfmC",
"warning": 50,
"critical": 0
},
{
"id": "2cSZzK3XfmE",
"warning": 0,
"critical": 0
}
]`
if got := string(b); got != want {
t.Errorf("got %v, want %v", got, want)
}
}
| {
t.Error("request sends json including responseTimeWarning but: ", m)
} | conditional_block |
impl.go | package mainimpl
import (
"fmt"
"os"
"io/ioutil"
"encoding/hex"
"path"
"path/filepath"
"os/signal"
"time"
"encoding/json"
"github.com/SmartMeshFoundation/SmartRaiden"
"github.com/SmartMeshFoundation/SmartRaiden/internal/debug"
"github.com/SmartMeshFoundation/SmartRaiden/log"
"github.com/SmartMeshFoundation/SmartRaiden/network"
"github.com/SmartMeshFoundation/SmartRaiden/network/helper"
"github.com/SmartMeshFoundation/SmartRaiden/network/rpc"
"github.com/SmartMeshFoundation/SmartRaiden/params"
"github.com/SmartMeshFoundation/SmartRaiden/restful"
"github.com/SmartMeshFoundation/SmartRaiden/utils"
ethutils "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1"
)
func StartMain() {
os.Args[0] = "smartraiden"
fmt.Printf("os.args=%q\n", os.Args)
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "address",
Usage: "The ethereum address you would like raiden to use and for which a keystore file exists in your local system.",
},
ethutils.DirectoryFlag{
Name: "keystore-path",
Usage: "If you have a non-standard path for the ethereum keystore directory provide it using this argument. ",
Value: ethutils.DirectoryString{params.DefaultKeyStoreDir()},
},
cli.StringFlag{
Name: "eth-rpc-endpoint",
Usage: `"host:port" address of ethereum JSON-RPC server.\n'
'Also accepts a protocol prefix (ws:// or ipc channel) with optional port',`,
Value: node.DefaultIPCEndpoint("geth"),
},
cli.StringFlag{
Name: "registry-contract-address",
Usage: `hex encoded address of the registry contract.`,
Value: params.ROPSTEN_REGISTRY_ADDRESS.String(),
},
cli.StringFlag{
Name: "discovery-contract-address",
Usage: `hex encoded address of the discovery contract.`,
Value: params.ROPSTEN_DISCOVERY_ADDRESS.String(),
},
cli.StringFlag{
Name: "listen-address",
Usage: `"host:port" for the raiden service to listen on.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil |
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3d - %s\n", i, am.Accounts[i].Address.String())
}
fmt.Println("")
for shouldPromt {
fmt.Printf("Select one of them by index to continue:\n")
idx := -1
fmt.Scanf("%d", &idx)
if idx >= 0 && idx < len(am.Accounts) {
shouldPromt = false
addr = am.Accounts[idx].Address
} else {
fmt.Printf("Error: Provided index %d is out of bounds", idx)
}
}
} else {
addr = adviceAddress
}
var password string
var err error
if len(passwordfile) > 0 {
data, err := ioutil.ReadFile(passwordfile)
if err != nil {
pass, err := utils.PasswordDecrypt(passwordfile)
if err != nil {
panic("decrypt pass err " + err.Error())
}
data = []byte(pass)
}
password = string(data)
log.Trace(fmt.Sprintf("password is %s", password))
keybin, err = am.GetPrivateKey(addr, password)
if err != nil {
log.Error(fmt.Sprintf("Incorrect password for %s in file. Aborting ... %s", addr.String(), err))
utils.SystemExit(1)
}
} else {
for i := 0; i < 3; i++ {
//retries three times
password = "123" //getpass.Prompt("Enter the password to unlock:")
keybin, err = am.GetPrivateKey(addr, password)
if err != nil && i == 3 {
log.Error(fmt.Sprintf("Exhausted passphrase unlock attempts for %s. Aborting ...", addr))
utils.SystemExit(1)
}
if err != nil {
log.Error(fmt.Sprintf("password incorrect\n Please try again or kill the process to quit.\nUsually Ctrl-c."))
continue
}
break
}
}
return
}
func config(ctx *cli.Context, pms *network.PortMappedSocket) *params.Config {
var err error
config := params.DefaultConfig
listenhost, listenport := network.SplitHostPort(ctx.String("listen-address"))
apihost, apiport := network.SplitHostPort(ctx.String("api-address"))
config.Host = listenhost
config.Port = listenport
config.UseConsole = ctx.Bool("console")
config.UseRpc = ctx.Bool("rpc")
config.ApiHost = apihost
config.ApiPort = apiport
config.ExternIp = pms.ExternalIp
config.ExternPort = pms.ExternalPort
max_unresponsive_time := ctx.Int64("max-unresponsive-time")
config.Protocol.NatKeepAliveTimeout = max_unresponsive_time / params.DEFAULT_NAT_KEEPALIVE_RETRIES
address := common.HexToAddress(ctx.String("address"))
address, privkeyBin := promptAccount(address, ctx.String("keystore-path"), ctx.String("password-file"))
config.PrivateKeyHex = hex.EncodeToString(privkeyBin)
config.PrivateKey, err = crypto.ToECDSA(privkeyBin)
config.MyAddress = address
if err != nil {
log.Error("privkey error:", err)
utils.SystemExit(1)
}
registAddrStr := ctx.String("registry-contract-address")
if len(registAddrStr) > 0 {
config.RegistryAddress = common.HexToAddress(registAddrStr)
}
discoverAddr := ctx.String("discovery-contract-address")
if len(discoverAddr) > 0 {
config.DiscoveryAddress = common.HexToAddress(discoverAddr)
}
dataDir := ctx.String("datadir")
if len(dataDir) == 0 {
dataDir = path.Join(utils.GetHomePath(), ".smartraiden")
}
config.DataDir = dataDir
if !utils.Exists(config.DataDir) {
err = os.MkdirAll(config.DataDir, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", config.DataDir, err))
utils.SystemExit(1)
}
}
userDbPath := hex.EncodeToString(config.MyAddress[:])
userDbPath = userDbPath[:8]
userDbPath = filepath.Join(config.DataDir, userDbPath)
if !utils.Exists(userDbPath) {
err = os.MkdirAll(userDbPath, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", userDbPath, err))
utils.SystemExit(1)
}
}
databasePath := filepath.Join(userDbPath, "log.db")
config.DataBasePath = databasePath
if ctx.Bool("debugcrash") {
config.DebugCrash = true
conditionquit := ctx.String("conditionquit")
err := json.Unmarshal([]byte(conditionquit), &config.ConditionQuit)
if err != nil {
panic(fmt.Sprintf("conditioquit parse error %s", err))
}
log.Info(fmt.Sprintf("condition quit=%#v", config.ConditionQuit))
}
config.Ice.StunServer = ctx.String("turn-server")
config.Ice.TurnServer = ctx.String("turn-server")
config.Ice.TurnUser = ctx.String("turn-user")
config.Ice.TurnPassword = ctx.String("turn-pass")
config.IgnoreMediatedNodeRequest = ctx.Bool("ignore-mediatednode-request")
if ctx.String("nat") == "ice" {
config.NetworkMode = params.ICEOnly
} else if ctx.Bool("nonetwork") {
config.NetworkMode = params.NoNetwork
} else {
config.NetworkMode = params.UDPOnly
}
if ctx.Bool("fee") {
config.EnableMediationFee = true
}
config.Ice.SignalServer = ctx.String("signal-server")
log.Trace(fmt.Sprintf("signal server=%s", config.Ice.SignalServer))
return &config
}
func init() {
//many race condtions don't resolve
setNativeThreadNumber()
}
func setNativeThreadNumber() {
//runtime.GOMAXPROCS(1)
}
| {
return err
} | conditional_block |
impl.go | package mainimpl
import (
"fmt"
"os"
"io/ioutil"
"encoding/hex"
"path"
"path/filepath"
"os/signal"
"time"
"encoding/json"
"github.com/SmartMeshFoundation/SmartRaiden"
"github.com/SmartMeshFoundation/SmartRaiden/internal/debug"
"github.com/SmartMeshFoundation/SmartRaiden/log"
"github.com/SmartMeshFoundation/SmartRaiden/network"
"github.com/SmartMeshFoundation/SmartRaiden/network/helper"
"github.com/SmartMeshFoundation/SmartRaiden/network/rpc"
"github.com/SmartMeshFoundation/SmartRaiden/params"
"github.com/SmartMeshFoundation/SmartRaiden/restful"
"github.com/SmartMeshFoundation/SmartRaiden/utils"
ethutils "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1"
)
func StartMain() {
os.Args[0] = "smartraiden"
fmt.Printf("os.args=%q\n", os.Args)
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "address",
Usage: "The ethereum address you would like raiden to use and for which a keystore file exists in your local system.",
},
ethutils.DirectoryFlag{
Name: "keystore-path",
Usage: "If you have a non-standard path for the ethereum keystore directory provide it using this argument. ",
Value: ethutils.DirectoryString{params.DefaultKeyStoreDir()},
},
cli.StringFlag{
Name: "eth-rpc-endpoint",
Usage: `"host:port" address of ethereum JSON-RPC server.\n'
'Also accepts a protocol prefix (ws:// or ipc channel) with optional port',`,
Value: node.DefaultIPCEndpoint("geth"),
},
cli.StringFlag{
Name: "registry-contract-address",
Usage: `hex encoded address of the registry contract.`,
Value: params.ROPSTEN_REGISTRY_ADDRESS.String(),
},
cli.StringFlag{
Name: "discovery-contract-address",
Usage: `hex encoded address of the discovery contract.`,
Value: params.ROPSTEN_DISCOVERY_ADDRESS.String(),
},
cli.StringFlag{
Name: "listen-address",
Usage: `"host:port" for the raiden service to listen on.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error |
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3d - %s\n", i, am.Accounts[i].Address.String())
}
fmt.Println("")
for shouldPromt {
fmt.Printf("Select one of them by index to continue:\n")
idx := -1
fmt.Scanf("%d", &idx)
if idx >= 0 && idx < len(am.Accounts) {
shouldPromt = false
addr = am.Accounts[idx].Address
} else {
fmt.Printf("Error: Provided index %d is out of bounds", idx)
}
}
} else {
addr = adviceAddress
}
var password string
var err error
if len(passwordfile) > 0 {
data, err := ioutil.ReadFile(passwordfile)
if err != nil {
pass, err := utils.PasswordDecrypt(passwordfile)
if err != nil {
panic("decrypt pass err " + err.Error())
}
data = []byte(pass)
}
password = string(data)
log.Trace(fmt.Sprintf("password is %s", password))
keybin, err = am.GetPrivateKey(addr, password)
if err != nil {
log.Error(fmt.Sprintf("Incorrect password for %s in file. Aborting ... %s", addr.String(), err))
utils.SystemExit(1)
}
} else {
for i := 0; i < 3; i++ {
//retries three times
password = "123" //getpass.Prompt("Enter the password to unlock:")
keybin, err = am.GetPrivateKey(addr, password)
if err != nil && i == 3 {
log.Error(fmt.Sprintf("Exhausted passphrase unlock attempts for %s. Aborting ...", addr))
utils.SystemExit(1)
}
if err != nil {
log.Error(fmt.Sprintf("password incorrect\n Please try again or kill the process to quit.\nUsually Ctrl-c."))
continue
}
break
}
}
return
}
func config(ctx *cli.Context, pms *network.PortMappedSocket) *params.Config {
var err error
config := params.DefaultConfig
listenhost, listenport := network.SplitHostPort(ctx.String("listen-address"))
apihost, apiport := network.SplitHostPort(ctx.String("api-address"))
config.Host = listenhost
config.Port = listenport
config.UseConsole = ctx.Bool("console")
config.UseRpc = ctx.Bool("rpc")
config.ApiHost = apihost
config.ApiPort = apiport
config.ExternIp = pms.ExternalIp
config.ExternPort = pms.ExternalPort
max_unresponsive_time := ctx.Int64("max-unresponsive-time")
config.Protocol.NatKeepAliveTimeout = max_unresponsive_time / params.DEFAULT_NAT_KEEPALIVE_RETRIES
address := common.HexToAddress(ctx.String("address"))
address, privkeyBin := promptAccount(address, ctx.String("keystore-path"), ctx.String("password-file"))
config.PrivateKeyHex = hex.EncodeToString(privkeyBin)
config.PrivateKey, err = crypto.ToECDSA(privkeyBin)
config.MyAddress = address
if err != nil {
log.Error("privkey error:", err)
utils.SystemExit(1)
}
registAddrStr := ctx.String("registry-contract-address")
if len(registAddrStr) > 0 {
config.RegistryAddress = common.HexToAddress(registAddrStr)
}
discoverAddr := ctx.String("discovery-contract-address")
if len(discoverAddr) > 0 {
config.DiscoveryAddress = common.HexToAddress(discoverAddr)
}
dataDir := ctx.String("datadir")
if len(dataDir) == 0 {
dataDir = path.Join(utils.GetHomePath(), ".smartraiden")
}
config.DataDir = dataDir
if !utils.Exists(config.DataDir) {
err = os.MkdirAll(config.DataDir, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", config.DataDir, err))
utils.SystemExit(1)
}
}
userDbPath := hex.EncodeToString(config.MyAddress[:])
userDbPath = userDbPath[:8]
userDbPath = filepath.Join(config.DataDir, userDbPath)
if !utils.Exists(userDbPath) {
err = os.MkdirAll(userDbPath, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", userDbPath, err))
utils.SystemExit(1)
}
}
databasePath := filepath.Join(userDbPath, "log.db")
config.DataBasePath = databasePath
if ctx.Bool("debugcrash") {
config.DebugCrash = true
conditionquit := ctx.String("conditionquit")
err := json.Unmarshal([]byte(conditionquit), &config.ConditionQuit)
if err != nil {
panic(fmt.Sprintf("conditioquit parse error %s", err))
}
log.Info(fmt.Sprintf("condition quit=%#v", config.ConditionQuit))
}
config.Ice.StunServer = ctx.String("turn-server")
config.Ice.TurnServer = ctx.String("turn-server")
config.Ice.TurnUser = ctx.String("turn-user")
config.Ice.TurnPassword = ctx.String("turn-pass")
config.IgnoreMediatedNodeRequest = ctx.Bool("ignore-mediatednode-request")
if ctx.String("nat") == "ice" {
config.NetworkMode = params.ICEOnly
} else if ctx.Bool("nonetwork") {
config.NetworkMode = params.NoNetwork
} else {
config.NetworkMode = params.UDPOnly
}
if ctx.Bool("fee") {
config.EnableMediationFee = true
}
config.Ice.SignalServer = ctx.String("signal-server")
log.Trace(fmt.Sprintf("signal server=%s", config.Ice.SignalServer))
return &config
}
func init() {
//many race condtions don't resolve
setNativeThreadNumber()
}
func setNativeThreadNumber() {
//runtime.GOMAXPROCS(1)
}
| {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
} | identifier_body |
impl.go | package mainimpl
import (
"fmt"
"os"
"io/ioutil"
"encoding/hex"
"path"
"path/filepath"
"os/signal"
"time"
"encoding/json"
"github.com/SmartMeshFoundation/SmartRaiden"
"github.com/SmartMeshFoundation/SmartRaiden/internal/debug"
"github.com/SmartMeshFoundation/SmartRaiden/log"
"github.com/SmartMeshFoundation/SmartRaiden/network"
"github.com/SmartMeshFoundation/SmartRaiden/network/helper"
"github.com/SmartMeshFoundation/SmartRaiden/network/rpc"
"github.com/SmartMeshFoundation/SmartRaiden/params"
"github.com/SmartMeshFoundation/SmartRaiden/restful"
"github.com/SmartMeshFoundation/SmartRaiden/utils"
ethutils "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1"
)
func StartMain() {
os.Args[0] = "smartraiden"
fmt.Printf("os.args=%q\n", os.Args)
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "address",
Usage: "The ethereum address you would like raiden to use and for which a keystore file exists in your local system.",
},
ethutils.DirectoryFlag{
Name: "keystore-path",
Usage: "If you have a non-standard path for the ethereum keystore directory provide it using this argument. ",
Value: ethutils.DirectoryString{params.DefaultKeyStoreDir()},
},
cli.StringFlag{
Name: "eth-rpc-endpoint",
Usage: `"host:port" address of ethereum JSON-RPC server.\n'
'Also accepts a protocol prefix (ws:// or ipc channel) with optional port',`,
Value: node.DefaultIPCEndpoint("geth"),
},
cli.StringFlag{
Name: "registry-contract-address",
Usage: `hex encoded address of the registry contract.`,
Value: params.ROPSTEN_REGISTRY_ADDRESS.String(),
},
cli.StringFlag{
Name: "discovery-contract-address",
Usage: `hex encoded address of the discovery contract.`,
Value: params.ROPSTEN_DISCOVERY_ADDRESS.String(),
},
cli.StringFlag{
Name: "listen-address",
Usage: `"host:port" for the raiden service to listen on.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func | (cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3d - %s\n", i, am.Accounts[i].Address.String())
}
fmt.Println("")
for shouldPromt {
fmt.Printf("Select one of them by index to continue:\n")
idx := -1
fmt.Scanf("%d", &idx)
if idx >= 0 && idx < len(am.Accounts) {
shouldPromt = false
addr = am.Accounts[idx].Address
} else {
fmt.Printf("Error: Provided index %d is out of bounds", idx)
}
}
} else {
addr = adviceAddress
}
var password string
var err error
if len(passwordfile) > 0 {
data, err := ioutil.ReadFile(passwordfile)
if err != nil {
pass, err := utils.PasswordDecrypt(passwordfile)
if err != nil {
panic("decrypt pass err " + err.Error())
}
data = []byte(pass)
}
password = string(data)
log.Trace(fmt.Sprintf("password is %s", password))
keybin, err = am.GetPrivateKey(addr, password)
if err != nil {
log.Error(fmt.Sprintf("Incorrect password for %s in file. Aborting ... %s", addr.String(), err))
utils.SystemExit(1)
}
} else {
for i := 0; i < 3; i++ {
//retries three times
password = "123" //getpass.Prompt("Enter the password to unlock:")
keybin, err = am.GetPrivateKey(addr, password)
if err != nil && i == 3 {
log.Error(fmt.Sprintf("Exhausted passphrase unlock attempts for %s. Aborting ...", addr))
utils.SystemExit(1)
}
if err != nil {
log.Error(fmt.Sprintf("password incorrect\n Please try again or kill the process to quit.\nUsually Ctrl-c."))
continue
}
break
}
}
return
}
func config(ctx *cli.Context, pms *network.PortMappedSocket) *params.Config {
var err error
config := params.DefaultConfig
listenhost, listenport := network.SplitHostPort(ctx.String("listen-address"))
apihost, apiport := network.SplitHostPort(ctx.String("api-address"))
config.Host = listenhost
config.Port = listenport
config.UseConsole = ctx.Bool("console")
config.UseRpc = ctx.Bool("rpc")
config.ApiHost = apihost
config.ApiPort = apiport
config.ExternIp = pms.ExternalIp
config.ExternPort = pms.ExternalPort
max_unresponsive_time := ctx.Int64("max-unresponsive-time")
config.Protocol.NatKeepAliveTimeout = max_unresponsive_time / params.DEFAULT_NAT_KEEPALIVE_RETRIES
address := common.HexToAddress(ctx.String("address"))
address, privkeyBin := promptAccount(address, ctx.String("keystore-path"), ctx.String("password-file"))
config.PrivateKeyHex = hex.EncodeToString(privkeyBin)
config.PrivateKey, err = crypto.ToECDSA(privkeyBin)
config.MyAddress = address
if err != nil {
log.Error("privkey error:", err)
utils.SystemExit(1)
}
registAddrStr := ctx.String("registry-contract-address")
if len(registAddrStr) > 0 {
config.RegistryAddress = common.HexToAddress(registAddrStr)
}
discoverAddr := ctx.String("discovery-contract-address")
if len(discoverAddr) > 0 {
config.DiscoveryAddress = common.HexToAddress(discoverAddr)
}
dataDir := ctx.String("datadir")
if len(dataDir) == 0 {
dataDir = path.Join(utils.GetHomePath(), ".smartraiden")
}
config.DataDir = dataDir
if !utils.Exists(config.DataDir) {
err = os.MkdirAll(config.DataDir, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", config.DataDir, err))
utils.SystemExit(1)
}
}
userDbPath := hex.EncodeToString(config.MyAddress[:])
userDbPath = userDbPath[:8]
userDbPath = filepath.Join(config.DataDir, userDbPath)
if !utils.Exists(userDbPath) {
err = os.MkdirAll(userDbPath, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", userDbPath, err))
utils.SystemExit(1)
}
}
databasePath := filepath.Join(userDbPath, "log.db")
config.DataBasePath = databasePath
if ctx.Bool("debugcrash") {
config.DebugCrash = true
conditionquit := ctx.String("conditionquit")
err := json.Unmarshal([]byte(conditionquit), &config.ConditionQuit)
if err != nil {
panic(fmt.Sprintf("conditioquit parse error %s", err))
}
log.Info(fmt.Sprintf("condition quit=%#v", config.ConditionQuit))
}
config.Ice.StunServer = ctx.String("turn-server")
config.Ice.TurnServer = ctx.String("turn-server")
config.Ice.TurnUser = ctx.String("turn-user")
config.Ice.TurnPassword = ctx.String("turn-pass")
config.IgnoreMediatedNodeRequest = ctx.Bool("ignore-mediatednode-request")
if ctx.String("nat") == "ice" {
config.NetworkMode = params.ICEOnly
} else if ctx.Bool("nonetwork") {
config.NetworkMode = params.NoNetwork
} else {
config.NetworkMode = params.UDPOnly
}
if ctx.Bool("fee") {
config.EnableMediationFee = true
}
config.Ice.SignalServer = ctx.String("signal-server")
log.Trace(fmt.Sprintf("signal server=%s", config.Ice.SignalServer))
return &config
}
func init() {
//many race condtions don't resolve
setNativeThreadNumber()
}
func setNativeThreadNumber() {
//runtime.GOMAXPROCS(1)
}
| buildTransportAndDiscovery | identifier_name |
impl.go | package mainimpl
import (
"fmt"
"os"
"io/ioutil"
"encoding/hex"
"path"
"path/filepath"
"os/signal"
"time"
"encoding/json"
"github.com/SmartMeshFoundation/SmartRaiden"
"github.com/SmartMeshFoundation/SmartRaiden/internal/debug"
"github.com/SmartMeshFoundation/SmartRaiden/log"
"github.com/SmartMeshFoundation/SmartRaiden/network"
"github.com/SmartMeshFoundation/SmartRaiden/network/helper"
"github.com/SmartMeshFoundation/SmartRaiden/network/rpc"
"github.com/SmartMeshFoundation/SmartRaiden/params"
"github.com/SmartMeshFoundation/SmartRaiden/restful"
"github.com/SmartMeshFoundation/SmartRaiden/utils"
ethutils "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1"
)
func StartMain() {
os.Args[0] = "smartraiden"
fmt.Printf("os.args=%q\n", os.Args)
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "address",
Usage: "The ethereum address you would like raiden to use and for which a keystore file exists in your local system.",
},
ethutils.DirectoryFlag{
Name: "keystore-path",
Usage: "If you have a non-standard path for the ethereum keystore directory provide it using this argument. ",
Value: ethutils.DirectoryString{params.DefaultKeyStoreDir()},
},
cli.StringFlag{
Name: "eth-rpc-endpoint",
Usage: `"host:port" address of ethereum JSON-RPC server.\n'
'Also accepts a protocol prefix (ws:// or ipc channel) with optional port',`,
Value: node.DefaultIPCEndpoint("geth"),
},
cli.StringFlag{
Name: "registry-contract-address",
Usage: `hex encoded address of the registry contract.`,
Value: params.ROPSTEN_REGISTRY_ADDRESS.String(),
},
cli.StringFlag{
Name: "discovery-contract-address",
Usage: `hex encoded address of the discovery contract.`,
Value: params.ROPSTEN_DISCOVERY_ADDRESS.String(),
},
cli.StringFlag{
Name: "listen-address",
Usage: `"host:port" for the raiden service to listen on.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues) | cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3d - %s\n", i, am.Accounts[i].Address.String())
}
fmt.Println("")
for shouldPromt {
fmt.Printf("Select one of them by index to continue:\n")
idx := -1
fmt.Scanf("%d", &idx)
if idx >= 0 && idx < len(am.Accounts) {
shouldPromt = false
addr = am.Accounts[idx].Address
} else {
fmt.Printf("Error: Provided index %d is out of bounds", idx)
}
}
} else {
addr = adviceAddress
}
var password string
var err error
if len(passwordfile) > 0 {
data, err := ioutil.ReadFile(passwordfile)
if err != nil {
pass, err := utils.PasswordDecrypt(passwordfile)
if err != nil {
panic("decrypt pass err " + err.Error())
}
data = []byte(pass)
}
password = string(data)
log.Trace(fmt.Sprintf("password is %s", password))
keybin, err = am.GetPrivateKey(addr, password)
if err != nil {
log.Error(fmt.Sprintf("Incorrect password for %s in file. Aborting ... %s", addr.String(), err))
utils.SystemExit(1)
}
} else {
for i := 0; i < 3; i++ {
//retries three times
password = "123" //getpass.Prompt("Enter the password to unlock:")
keybin, err = am.GetPrivateKey(addr, password)
if err != nil && i == 3 {
log.Error(fmt.Sprintf("Exhausted passphrase unlock attempts for %s. Aborting ...", addr))
utils.SystemExit(1)
}
if err != nil {
log.Error(fmt.Sprintf("password incorrect\n Please try again or kill the process to quit.\nUsually Ctrl-c."))
continue
}
break
}
}
return
}
func config(ctx *cli.Context, pms *network.PortMappedSocket) *params.Config {
var err error
config := params.DefaultConfig
listenhost, listenport := network.SplitHostPort(ctx.String("listen-address"))
apihost, apiport := network.SplitHostPort(ctx.String("api-address"))
config.Host = listenhost
config.Port = listenport
config.UseConsole = ctx.Bool("console")
config.UseRpc = ctx.Bool("rpc")
config.ApiHost = apihost
config.ApiPort = apiport
config.ExternIp = pms.ExternalIp
config.ExternPort = pms.ExternalPort
max_unresponsive_time := ctx.Int64("max-unresponsive-time")
config.Protocol.NatKeepAliveTimeout = max_unresponsive_time / params.DEFAULT_NAT_KEEPALIVE_RETRIES
address := common.HexToAddress(ctx.String("address"))
address, privkeyBin := promptAccount(address, ctx.String("keystore-path"), ctx.String("password-file"))
config.PrivateKeyHex = hex.EncodeToString(privkeyBin)
config.PrivateKey, err = crypto.ToECDSA(privkeyBin)
config.MyAddress = address
if err != nil {
log.Error("privkey error:", err)
utils.SystemExit(1)
}
registAddrStr := ctx.String("registry-contract-address")
if len(registAddrStr) > 0 {
config.RegistryAddress = common.HexToAddress(registAddrStr)
}
discoverAddr := ctx.String("discovery-contract-address")
if len(discoverAddr) > 0 {
config.DiscoveryAddress = common.HexToAddress(discoverAddr)
}
dataDir := ctx.String("datadir")
if len(dataDir) == 0 {
dataDir = path.Join(utils.GetHomePath(), ".smartraiden")
}
config.DataDir = dataDir
if !utils.Exists(config.DataDir) {
err = os.MkdirAll(config.DataDir, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", config.DataDir, err))
utils.SystemExit(1)
}
}
userDbPath := hex.EncodeToString(config.MyAddress[:])
userDbPath = userDbPath[:8]
userDbPath = filepath.Join(config.DataDir, userDbPath)
if !utils.Exists(userDbPath) {
err = os.MkdirAll(userDbPath, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Datadir:%s doesn't exist and cannot create %v", userDbPath, err))
utils.SystemExit(1)
}
}
databasePath := filepath.Join(userDbPath, "log.db")
config.DataBasePath = databasePath
if ctx.Bool("debugcrash") {
config.DebugCrash = true
conditionquit := ctx.String("conditionquit")
err := json.Unmarshal([]byte(conditionquit), &config.ConditionQuit)
if err != nil {
panic(fmt.Sprintf("conditioquit parse error %s", err))
}
log.Info(fmt.Sprintf("condition quit=%#v", config.ConditionQuit))
}
config.Ice.StunServer = ctx.String("turn-server")
config.Ice.TurnServer = ctx.String("turn-server")
config.Ice.TurnUser = ctx.String("turn-user")
config.Ice.TurnPassword = ctx.String("turn-pass")
config.IgnoreMediatedNodeRequest = ctx.Bool("ignore-mediatednode-request")
if ctx.String("nat") == "ice" {
config.NetworkMode = params.ICEOnly
} else if ctx.Bool("nonetwork") {
config.NetworkMode = params.NoNetwork
} else {
config.NetworkMode = params.UDPOnly
}
if ctx.Bool("fee") {
config.EnableMediationFee = true
}
config.Ice.SignalServer = ctx.String("signal-server")
log.Trace(fmt.Sprintf("signal server=%s", config.Ice.SignalServer))
return &config
}
func init() {
//many race condtions don't resolve
setNativeThreadNumber()
}
func setNativeThreadNumber() {
//runtime.GOMAXPROCS(1)
} | "ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
}, | random_line_split |
evaluation_confidence_mask_sinmul.py | import sys
sys.path.append("/home/aab10867zc/work/aist/pspicker/code")
import config
import utils
import pandas as pd
import numpy as np
from obspy import Trace,Stream
import matplotlib.pyplot as plt
from obspy.core import read
from glob import glob
import shutil
import math
import datetime
import random
import json
import argparse
import os
import keras
import multiprocessing
import re
import itertools
import tensorflow as tf
import keras.backend as K
import matplotlib.patches as patches
import model_multi_confidence_sta_mask_pure as MultiModel
MULTI_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20191024T145750/4/ckpt-e021-l1.0150.h5"
import model as SingleModel
SINGLE_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20190828T152936/4/ckpt-e026-l0.2041.h5"
TEST_DICT="/home/aab10867zc/work/aist/pspicker/metadata/pspicker_meta_test_2019-07-29.json"
MODEL_DIR="/home/aab10867zc/work/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
|
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results = []
for window_id in self.dataset.window_ids:
streams = self.dataset.load_streams(window_id)
window = self.dataset.load_window(window_id)
mask,ids,station = self.dataset.load_mask(window_id)
single_r=self.single_model.detect(np.expand_dims(window,axis=1))
r=self.multi_model.detect(np.expand_dims(window,axis=0),np.expand_dims(station,axis=0))[0]
for multi_box_id,box in enumerate(r["rois"]):
for station_id,single_result in enumerate(single_r):
overlap = compute_overlap_rate(box,single_result["rois"])
if sum(overlap > self.overlap_threshold) > 0:
single_box_id = np.argmax(overlap)
else :
continue
r["match_ids"][multi_box_id][station_id] = single_result["class_ids"][single_box_id]
r["match_scores"][multi_box_id][station_id] = single_result["scores"][single_box_id]
r["masks"][station_id][:,multi_box_id] = np.squeeze(single_result["masks"],axis=0)[:,single_box_id]
r["masks"]=extract_bboxes(r["masks"])
for key,value in r.items():
r[key]=default(value)
streams_info={}
for i,stream in enumerate(streams):
tr=stream.select(channel="U")[0]
station=tr.stats.station
sac_dict=dict(tr.stats.sac)
for key,value in sac_dict.items():
sac_dict[key]=myconverter(value)
streams_info[station]=sac_dict
if i ==0:
r["starttime"]=myconverter(tr.stats.starttime.datetime)
r["endtime"]=myconverter(tr.stats.endtime.datetime)
r["streams_info"]=streams_info
r["window_id"]=str(window_id)
r["event_id"]=self.dataset.window_info[window_id]["main_name"]
test_results.append(r)
if int(window_id)%500 ==0:
print("{}% done.".format(int(window_id)/len(self.dataset.window_ids)))
return test_results
def write_json(self,metadata,dir_path):
json_name="pspicker_meta.json"
with open(os.path.join(dir_path,json_name),"w") as outfile:
json.dump(metadata,outfile)
def main():
single_config=SingleInferenceConfig()
single_config.display()
multi_config=MultiInferenceConfig()
multi_config.display()
single_model=SingleModel.MaskRCNN(mode="inference", config=single_config,
model_dir=MODEL_DIR)
single_model.load_weights(SINGLE_MODEL_PATH,by_name=True)
print("Single station model has been loaded.")
multi_model=MultiModel.MaskRCNN(mode="inference", config=multi_config,
model_dir=MODEL_DIR)
multi_model.load_weights(MULTI_MODEL_PATH,by_name=True)
print("Multi station model has been loaded.")
with open(TEST_DICT)as f:
test_dict=json.load(f)
dataset=PSpickerDataset()
dataset.load_sac(test_dict,add_sub=False)
dataset.prepare()
print("Start evaluation process.")
evaluation = Evaluation_confidence_mask(single_model,multi_model,dataset,overlap_threshold=0.3)
results = evaluation.evaluate()
evaluation.write_json(results,EVAL_DIR)
if __name__ == '__main__':
main()
| return info["station"] | conditional_block |
evaluation_confidence_mask_sinmul.py | import sys
sys.path.append("/home/aab10867zc/work/aist/pspicker/code")
import config
import utils
import pandas as pd
import numpy as np
from obspy import Trace,Stream
import matplotlib.pyplot as plt
from obspy.core import read
from glob import glob
import shutil
import math
import datetime
import random
import json
import argparse
import os
import keras
import multiprocessing
import re
import itertools
import tensorflow as tf
import keras.backend as K
import matplotlib.patches as patches
import model_multi_confidence_sta_mask_pure as MultiModel
MULTI_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20191024T145750/4/ckpt-e021-l1.0150.h5"
import model as SingleModel
SINGLE_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20190828T152936/4/ckpt-e026-l0.2041.h5"
TEST_DICT="/home/aab10867zc/work/aist/pspicker/metadata/pspicker_meta_test_2019-07-29.json"
MODEL_DIR="/home/aab10867zc/work/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime): | else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results = []
for window_id in self.dataset.window_ids:
streams = self.dataset.load_streams(window_id)
window = self.dataset.load_window(window_id)
mask,ids,station = self.dataset.load_mask(window_id)
single_r=self.single_model.detect(np.expand_dims(window,axis=1))
r=self.multi_model.detect(np.expand_dims(window,axis=0),np.expand_dims(station,axis=0))[0]
for multi_box_id,box in enumerate(r["rois"]):
for station_id,single_result in enumerate(single_r):
overlap = compute_overlap_rate(box,single_result["rois"])
if sum(overlap > self.overlap_threshold) > 0:
single_box_id = np.argmax(overlap)
else :
continue
r["match_ids"][multi_box_id][station_id] = single_result["class_ids"][single_box_id]
r["match_scores"][multi_box_id][station_id] = single_result["scores"][single_box_id]
r["masks"][station_id][:,multi_box_id] = np.squeeze(single_result["masks"],axis=0)[:,single_box_id]
r["masks"]=extract_bboxes(r["masks"])
for key,value in r.items():
r[key]=default(value)
streams_info={}
for i,stream in enumerate(streams):
tr=stream.select(channel="U")[0]
station=tr.stats.station
sac_dict=dict(tr.stats.sac)
for key,value in sac_dict.items():
sac_dict[key]=myconverter(value)
streams_info[station]=sac_dict
if i ==0:
r["starttime"]=myconverter(tr.stats.starttime.datetime)
r["endtime"]=myconverter(tr.stats.endtime.datetime)
r["streams_info"]=streams_info
r["window_id"]=str(window_id)
r["event_id"]=self.dataset.window_info[window_id]["main_name"]
test_results.append(r)
if int(window_id)%500 ==0:
print("{}% done.".format(int(window_id)/len(self.dataset.window_ids)))
return test_results
def write_json(self,metadata,dir_path):
json_name="pspicker_meta.json"
with open(os.path.join(dir_path,json_name),"w") as outfile:
json.dump(metadata,outfile)
def main():
single_config=SingleInferenceConfig()
single_config.display()
multi_config=MultiInferenceConfig()
multi_config.display()
single_model=SingleModel.MaskRCNN(mode="inference", config=single_config,
model_dir=MODEL_DIR)
single_model.load_weights(SINGLE_MODEL_PATH,by_name=True)
print("Single station model has been loaded.")
multi_model=MultiModel.MaskRCNN(mode="inference", config=multi_config,
model_dir=MODEL_DIR)
multi_model.load_weights(MULTI_MODEL_PATH,by_name=True)
print("Multi station model has been loaded.")
with open(TEST_DICT)as f:
test_dict=json.load(f)
dataset=PSpickerDataset()
dataset.load_sac(test_dict,add_sub=False)
dataset.prepare()
print("Start evaluation process.")
evaluation = Evaluation_confidence_mask(single_model,multi_model,dataset,overlap_threshold=0.3)
results = evaluation.evaluate()
evaluation.write_json(results,EVAL_DIR)
if __name__ == '__main__':
main() | return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__() | random_line_split |
evaluation_confidence_mask_sinmul.py | import sys
sys.path.append("/home/aab10867zc/work/aist/pspicker/code")
import config
import utils
import pandas as pd
import numpy as np
from obspy import Trace,Stream
import matplotlib.pyplot as plt
from obspy.core import read
from glob import glob
import shutil
import math
import datetime
import random
import json
import argparse
import os
import keras
import multiprocessing
import re
import itertools
import tensorflow as tf
import keras.backend as K
import matplotlib.patches as patches
import model_multi_confidence_sta_mask_pure as MultiModel
MULTI_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20191024T145750/4/ckpt-e021-l1.0150.h5"
import model as SingleModel
SINGLE_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20190828T152936/4/ckpt-e026-l0.2041.h5"
TEST_DICT="/home/aab10867zc/work/aist/pspicker/metadata/pspicker_meta_test_2019-07-29.json"
MODEL_DIR="/home/aab10867zc/work/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
|
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results = []
for window_id in self.dataset.window_ids:
streams = self.dataset.load_streams(window_id)
window = self.dataset.load_window(window_id)
mask,ids,station = self.dataset.load_mask(window_id)
single_r=self.single_model.detect(np.expand_dims(window,axis=1))
r=self.multi_model.detect(np.expand_dims(window,axis=0),np.expand_dims(station,axis=0))[0]
for multi_box_id,box in enumerate(r["rois"]):
for station_id,single_result in enumerate(single_r):
overlap = compute_overlap_rate(box,single_result["rois"])
if sum(overlap > self.overlap_threshold) > 0:
single_box_id = np.argmax(overlap)
else :
continue
r["match_ids"][multi_box_id][station_id] = single_result["class_ids"][single_box_id]
r["match_scores"][multi_box_id][station_id] = single_result["scores"][single_box_id]
r["masks"][station_id][:,multi_box_id] = np.squeeze(single_result["masks"],axis=0)[:,single_box_id]
r["masks"]=extract_bboxes(r["masks"])
for key,value in r.items():
r[key]=default(value)
streams_info={}
for i,stream in enumerate(streams):
tr=stream.select(channel="U")[0]
station=tr.stats.station
sac_dict=dict(tr.stats.sac)
for key,value in sac_dict.items():
sac_dict[key]=myconverter(value)
streams_info[station]=sac_dict
if i ==0:
r["starttime"]=myconverter(tr.stats.starttime.datetime)
r["endtime"]=myconverter(tr.stats.endtime.datetime)
r["streams_info"]=streams_info
r["window_id"]=str(window_id)
r["event_id"]=self.dataset.window_info[window_id]["main_name"]
test_results.append(r)
if int(window_id)%500 ==0:
print("{}% done.".format(int(window_id)/len(self.dataset.window_ids)))
return test_results
def write_json(self,metadata,dir_path):
json_name="pspicker_meta.json"
with open(os.path.join(dir_path,json_name),"w") as outfile:
json.dump(metadata,outfile)
def main():
single_config=SingleInferenceConfig()
single_config.display()
multi_config=MultiInferenceConfig()
multi_config.display()
single_model=SingleModel.MaskRCNN(mode="inference", config=single_config,
model_dir=MODEL_DIR)
single_model.load_weights(SINGLE_MODEL_PATH,by_name=True)
print("Single station model has been loaded.")
multi_model=MultiModel.MaskRCNN(mode="inference", config=multi_config,
model_dir=MODEL_DIR)
multi_model.load_weights(MULTI_MODEL_PATH,by_name=True)
print("Multi station model has been loaded.")
with open(TEST_DICT)as f:
test_dict=json.load(f)
dataset=PSpickerDataset()
dataset.load_sac(test_dict,add_sub=False)
dataset.prepare()
print("Start evaluation process.")
evaluation = Evaluation_confidence_mask(single_model,multi_model,dataset,overlap_threshold=0.3)
results = evaluation.evaluate()
evaluation.write_json(results,EVAL_DIR)
if __name__ == '__main__':
main()
| info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams | identifier_body |
evaluation_confidence_mask_sinmul.py | import sys
sys.path.append("/home/aab10867zc/work/aist/pspicker/code")
import config
import utils
import pandas as pd
import numpy as np
from obspy import Trace,Stream
import matplotlib.pyplot as plt
from obspy.core import read
from glob import glob
import shutil
import math
import datetime
import random
import json
import argparse
import os
import keras
import multiprocessing
import re
import itertools
import tensorflow as tf
import keras.backend as K
import matplotlib.patches as patches
import model_multi_confidence_sta_mask_pure as MultiModel
MULTI_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20191024T145750/4/ckpt-e021-l1.0150.h5"
import model as SingleModel
SINGLE_MODEL_PATH="/home/aab10867zc/work/aist/pspicker/training_plan/pspicker20190828T152936/4/ckpt-e026-l0.2041.h5"
TEST_DICT="/home/aab10867zc/work/aist/pspicker/metadata/pspicker_meta_test_2019-07-29.json"
MODEL_DIR="/home/aab10867zc/work/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def | (self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results = []
for window_id in self.dataset.window_ids:
streams = self.dataset.load_streams(window_id)
window = self.dataset.load_window(window_id)
mask,ids,station = self.dataset.load_mask(window_id)
single_r=self.single_model.detect(np.expand_dims(window,axis=1))
r=self.multi_model.detect(np.expand_dims(window,axis=0),np.expand_dims(station,axis=0))[0]
for multi_box_id,box in enumerate(r["rois"]):
for station_id,single_result in enumerate(single_r):
overlap = compute_overlap_rate(box,single_result["rois"])
if sum(overlap > self.overlap_threshold) > 0:
single_box_id = np.argmax(overlap)
else :
continue
r["match_ids"][multi_box_id][station_id] = single_result["class_ids"][single_box_id]
r["match_scores"][multi_box_id][station_id] = single_result["scores"][single_box_id]
r["masks"][station_id][:,multi_box_id] = np.squeeze(single_result["masks"],axis=0)[:,single_box_id]
r["masks"]=extract_bboxes(r["masks"])
for key,value in r.items():
r[key]=default(value)
streams_info={}
for i,stream in enumerate(streams):
tr=stream.select(channel="U")[0]
station=tr.stats.station
sac_dict=dict(tr.stats.sac)
for key,value in sac_dict.items():
sac_dict[key]=myconverter(value)
streams_info[station]=sac_dict
if i ==0:
r["starttime"]=myconverter(tr.stats.starttime.datetime)
r["endtime"]=myconverter(tr.stats.endtime.datetime)
r["streams_info"]=streams_info
r["window_id"]=str(window_id)
r["event_id"]=self.dataset.window_info[window_id]["main_name"]
test_results.append(r)
if int(window_id)%500 ==0:
print("{}% done.".format(int(window_id)/len(self.dataset.window_ids)))
return test_results
def write_json(self,metadata,dir_path):
json_name="pspicker_meta.json"
with open(os.path.join(dir_path,json_name),"w") as outfile:
json.dump(metadata,outfile)
def main():
single_config=SingleInferenceConfig()
single_config.display()
multi_config=MultiInferenceConfig()
multi_config.display()
single_model=SingleModel.MaskRCNN(mode="inference", config=single_config,
model_dir=MODEL_DIR)
single_model.load_weights(SINGLE_MODEL_PATH,by_name=True)
print("Single station model has been loaded.")
multi_model=MultiModel.MaskRCNN(mode="inference", config=multi_config,
model_dir=MODEL_DIR)
multi_model.load_weights(MULTI_MODEL_PATH,by_name=True)
print("Multi station model has been loaded.")
with open(TEST_DICT)as f:
test_dict=json.load(f)
dataset=PSpickerDataset()
dataset.load_sac(test_dict,add_sub=False)
dataset.prepare()
print("Start evaluation process.")
evaluation = Evaluation_confidence_mask(single_model,multi_model,dataset,overlap_threshold=0.3)
results = evaluation.evaluate()
evaluation.write_json(results,EVAL_DIR)
if __name__ == '__main__':
main()
| __init__ | identifier_name |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
| let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
} | let parsed = Url::parse(&url_dirty).unwrap(); | random_line_split |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn | (&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| clean_url | identifier_name |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) | else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| {
println!("key found: {:?}", key);
modified = true;
} | conditional_block |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> |
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
} | identifier_body |
generate-map.ts | import { distSq } from "./vector";
import { Delaunay } from "d3-delaunay";
import Map, { Tile } from "./map";
import { line } from "./utils";
// Crude Djikstra implementation over a d3-delaunay triangulation
function routeTo(delaunay: Delaunay<Delaunay.Point>, start: number, end: number) {
const pointCount = delaunay.points.length / 2;
const unvisited = new Array(pointCount).fill(null).map((_, i) => i);
const dist: number[] = new Array(pointCount).fill(Number.POSITIVE_INFINITY);
const prev: number[] = new Array(pointCount).fill(null);
dist[start] = 0;
while (unvisited.length) {
let from = unvisited[0];
// pick vertex with min dist
for (let i = 1; i < unvisited.length; i++) {
if (dist[unvisited[i]] < dist[from]) {
from = unvisited[i];
}
}
unvisited.splice(unvisited.indexOf(from), 1);
for (const to of delaunay.neighbors(from)) {
const [x0, y0] = [delaunay.points[from * 2], delaunay.points[from * 2 + 1]];
const [x1, y1] = [delaunay.points[to * 2], delaunay.points[to * 2 + 1]];
const [dx, dy] = [x1 - x0, y1 - y0];
let d = dist[from] + Math.sqrt(dx * dx + dy * dy);
if (d < dist[to]) {
dist[to] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
| () {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriangulation;
const wallDensity = Math.random() * 0.1 + 0.1;
for (let i = 0; i < halfedges.length; i++) {
if (Math.random() > wallDensity) continue;
const j = halfedges[i];
if (j < i) continue;
const p0 = interior[triangles[i]];
const p1 = interior[triangles[j]];
if (waterPath.includes(p0) && waterPath.includes(p1)) continue;
usedPoints.push(p0);
usedPoints.push(p1);
const [x0, y0] = points[p0];
const [x1, y1] = points[p1];
mapBuilder.addWall(x0, y0, x1, y1, Math.random());
}
// add stream along water path
for (let i = 0; i < waterPath.length - 1; i++) {
const [x0, y0] = points[waterPath[i]];
const [x1, y1] = points[waterPath[i + 1]];
mapBuilder.addStream(x0, y0, x1, y1);
}
// add crossings at open points along the stream
const crossings = [];
for (let i = 0; i < waterPath.length - 1; i++) {
if (interior.includes(waterPath[i]) && !usedPoints.includes(waterPath[i])) {
crossings.push(waterPath[i]);
usedPoints.push(waterPath[i]);
}
}
for (let i = 0; i < crossings.length; i++) {
const [x, y] = points[crossings[i]];
mapBuilder.addCrossing(x, y);
usedPoints.push(crossings[i]);
crossings.splice(i, 1);
}
// pick an open point to use as a starting point
const start = interior.find(i => !usedPoints.includes(i));
mapBuilder.map.start = points[start].map(Math.round) as [number, number];
usedPoints.push(start);
mapBuilder.addPaddock(points[start][0], points[start][1]);
// place grasses at other open points
const grassDensity = Math.random() * 0.5 + 0.25;
for (let i = 0; i < interior.length; i++) {
const j = interior[i];
if (!usedPoints.includes(j) && Math.random() < grassDensity) {
const [x, y] = points[j];
mapBuilder.plantGrass(x, y);
}
}
// grow grass
for (let i = 0; i < 6; i++) {
mapBuilder.iterateGrass();
}
// place sheep at other points
const openPoints = interior.filter(i => !usedPoints.includes(i));
const sheepCount = 8; // TODO: where should this come from?
while (openPoints.length > 0 && mapBuilder.map.sheepStart.length < sheepCount) {
const j = ~~(Math.random() * openPoints.length);
mapBuilder.map.sheepStart.push(points[openPoints[j]]);
openPoints.splice(j, 0);
}
return mapBuilder.map;
}
// render map for testing
export function previewMap(map: Map) {
for (let y = 0; y < map.height; y++) {
for (let x = 0; x < map.width; x++) {
if (map.get(x, y) === Tile.Wall) {
renderer.set(x, y, palette.chestnut);
} else if (map.get(x, y) === Tile.Grass) {
renderer.set(x, y, palette.forestGreen);
} else if (map.get(x, y) === Tile.Water) {
renderer.set(x, y, palette.midnightBlue);
} else if (map.get(x, y) === Tile.Crossing) {
renderer.set(x, y, palette.aquamarine);
}
}
}
}
| iterateGrass | identifier_name |
generate-map.ts | import { distSq } from "./vector";
import { Delaunay } from "d3-delaunay";
import Map, { Tile } from "./map";
import { line } from "./utils";
// Crude Djikstra implementation over a d3-delaunay triangulation
function routeTo(delaunay: Delaunay<Delaunay.Point>, start: number, end: number) {
const pointCount = delaunay.points.length / 2;
const unvisited = new Array(pointCount).fill(null).map((_, i) => i);
const dist: number[] = new Array(pointCount).fill(Number.POSITIVE_INFINITY);
const prev: number[] = new Array(pointCount).fill(null);
dist[start] = 0;
while (unvisited.length) {
let from = unvisited[0];
// pick vertex with min dist
for (let i = 1; i < unvisited.length; i++) {
if (dist[unvisited[i]] < dist[from]) {
from = unvisited[i];
}
}
unvisited.splice(unvisited.indexOf(from), 1);
for (const to of delaunay.neighbors(from)) {
const [x0, y0] = [delaunay.points[from * 2], delaunay.points[from * 2 + 1]];
const [x1, y1] = [delaunay.points[to * 2], delaunay.points[to * 2 + 1]];
const [dx, dy] = [x1 - x0, y1 - y0];
let d = dist[from] + Math.sqrt(dx * dx + dy * dy);
if (d < dist[to]) {
dist[to] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) |
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriangulation;
const wallDensity = Math.random() * 0.1 + 0.1;
for (let i = 0; i < halfedges.length; i++) {
if (Math.random() > wallDensity) continue;
const j = halfedges[i];
if (j < i) continue;
const p0 = interior[triangles[i]];
const p1 = interior[triangles[j]];
if (waterPath.includes(p0) && waterPath.includes(p1)) continue;
usedPoints.push(p0);
usedPoints.push(p1);
const [x0, y0] = points[p0];
const [x1, y1] = points[p1];
mapBuilder.addWall(x0, y0, x1, y1, Math.random());
}
// add stream along water path
for (let i = 0; i < waterPath.length - 1; i++) {
const [x0, y0] = points[waterPath[i]];
const [x1, y1] = points[waterPath[i + 1]];
mapBuilder.addStream(x0, y0, x1, y1);
}
// add crossings at open points along the stream
const crossings = [];
for (let i = 0; i < waterPath.length - 1; i++) {
if (interior.includes(waterPath[i]) && !usedPoints.includes(waterPath[i])) {
crossings.push(waterPath[i]);
usedPoints.push(waterPath[i]);
}
}
for (let i = 0; i < crossings.length; i++) {
const [x, y] = points[crossings[i]];
mapBuilder.addCrossing(x, y);
usedPoints.push(crossings[i]);
crossings.splice(i, 1);
}
// pick an open point to use as a starting point
const start = interior.find(i => !usedPoints.includes(i));
mapBuilder.map.start = points[start].map(Math.round) as [number, number];
usedPoints.push(start);
mapBuilder.addPaddock(points[start][0], points[start][1]);
// place grasses at other open points
const grassDensity = Math.random() * 0.5 + 0.25;
for (let i = 0; i < interior.length; i++) {
const j = interior[i];
if (!usedPoints.includes(j) && Math.random() < grassDensity) {
const [x, y] = points[j];
mapBuilder.plantGrass(x, y);
}
}
// grow grass
for (let i = 0; i < 6; i++) {
mapBuilder.iterateGrass();
}
// place sheep at other points
const openPoints = interior.filter(i => !usedPoints.includes(i));
const sheepCount = 8; // TODO: where should this come from?
while (openPoints.length > 0 && mapBuilder.map.sheepStart.length < sheepCount) {
const j = ~~(Math.random() * openPoints.length);
mapBuilder.map.sheepStart.push(points[openPoints[j]]);
openPoints.splice(j, 0);
}
return mapBuilder.map;
}
// render map for testing
export function previewMap(map: Map) {
for (let y = 0; y < map.height; y++) {
for (let x = 0; x < map.width; x++) {
if (map.get(x, y) === Tile.Wall) {
renderer.set(x, y, palette.chestnut);
} else if (map.get(x, y) === Tile.Grass) {
renderer.set(x, y, palette.forestGreen);
} else if (map.get(x, y) === Tile.Water) {
renderer.set(x, y, palette.midnightBlue);
} else if (map.get(x, y) === Tile.Crossing) {
renderer.set(x, y, palette.aquamarine);
}
}
}
}
| {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
} | identifier_body |
generate-map.ts | import { distSq } from "./vector";
import { Delaunay } from "d3-delaunay";
import Map, { Tile } from "./map";
import { line } from "./utils";
// Crude Djikstra implementation over a d3-delaunay triangulation
function routeTo(delaunay: Delaunay<Delaunay.Point>, start: number, end: number) {
const pointCount = delaunay.points.length / 2;
const unvisited = new Array(pointCount).fill(null).map((_, i) => i);
const dist: number[] = new Array(pointCount).fill(Number.POSITIVE_INFINITY);
const prev: number[] = new Array(pointCount).fill(null);
dist[start] = 0;
while (unvisited.length) {
let from = unvisited[0];
// pick vertex with min dist
for (let i = 1; i < unvisited.length; i++) {
if (dist[unvisited[i]] < dist[from]) {
from = unvisited[i];
}
}
unvisited.splice(unvisited.indexOf(from), 1);
for (const to of delaunay.neighbors(from)) {
const [x0, y0] = [delaunay.points[from * 2], delaunay.points[from * 2 + 1]];
const [x1, y1] = [delaunay.points[to * 2], delaunay.points[to * 2 + 1]];
const [dx, dy] = [x1 - x0, y1 - y0];
let d = dist[from] + Math.sqrt(dx * dx + dy * dy);
if (d < dist[to]) {
dist[to] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriangulation;
const wallDensity = Math.random() * 0.1 + 0.1;
for (let i = 0; i < halfedges.length; i++) {
if (Math.random() > wallDensity) continue;
const j = halfedges[i];
if (j < i) continue;
const p0 = interior[triangles[i]];
const p1 = interior[triangles[j]];
if (waterPath.includes(p0) && waterPath.includes(p1)) continue;
usedPoints.push(p0);
usedPoints.push(p1);
const [x0, y0] = points[p0];
const [x1, y1] = points[p1];
mapBuilder.addWall(x0, y0, x1, y1, Math.random());
}
// add stream along water path
for (let i = 0; i < waterPath.length - 1; i++) {
const [x0, y0] = points[waterPath[i]]; | const [x1, y1] = points[waterPath[i + 1]];
mapBuilder.addStream(x0, y0, x1, y1);
}
// add crossings at open points along the stream
const crossings = [];
for (let i = 0; i < waterPath.length - 1; i++) {
if (interior.includes(waterPath[i]) && !usedPoints.includes(waterPath[i])) {
crossings.push(waterPath[i]);
usedPoints.push(waterPath[i]);
}
}
for (let i = 0; i < crossings.length; i++) {
const [x, y] = points[crossings[i]];
mapBuilder.addCrossing(x, y);
usedPoints.push(crossings[i]);
crossings.splice(i, 1);
}
// pick an open point to use as a starting point
const start = interior.find(i => !usedPoints.includes(i));
mapBuilder.map.start = points[start].map(Math.round) as [number, number];
usedPoints.push(start);
mapBuilder.addPaddock(points[start][0], points[start][1]);
// place grasses at other open points
const grassDensity = Math.random() * 0.5 + 0.25;
for (let i = 0; i < interior.length; i++) {
const j = interior[i];
if (!usedPoints.includes(j) && Math.random() < grassDensity) {
const [x, y] = points[j];
mapBuilder.plantGrass(x, y);
}
}
// grow grass
for (let i = 0; i < 6; i++) {
mapBuilder.iterateGrass();
}
// place sheep at other points
const openPoints = interior.filter(i => !usedPoints.includes(i));
const sheepCount = 8; // TODO: where should this come from?
while (openPoints.length > 0 && mapBuilder.map.sheepStart.length < sheepCount) {
const j = ~~(Math.random() * openPoints.length);
mapBuilder.map.sheepStart.push(points[openPoints[j]]);
openPoints.splice(j, 0);
}
return mapBuilder.map;
}
// render map for testing
export function previewMap(map: Map) {
for (let y = 0; y < map.height; y++) {
for (let x = 0; x < map.width; x++) {
if (map.get(x, y) === Tile.Wall) {
renderer.set(x, y, palette.chestnut);
} else if (map.get(x, y) === Tile.Grass) {
renderer.set(x, y, palette.forestGreen);
} else if (map.get(x, y) === Tile.Water) {
renderer.set(x, y, palette.midnightBlue);
} else if (map.get(x, y) === Tile.Crossing) {
renderer.set(x, y, palette.aquamarine);
}
}
}
} | random_line_split | |
generate-map.ts | import { distSq } from "./vector";
import { Delaunay } from "d3-delaunay";
import Map, { Tile } from "./map";
import { line } from "./utils";
// Crude Djikstra implementation over a d3-delaunay triangulation
function routeTo(delaunay: Delaunay<Delaunay.Point>, start: number, end: number) {
const pointCount = delaunay.points.length / 2;
const unvisited = new Array(pointCount).fill(null).map((_, i) => i);
const dist: number[] = new Array(pointCount).fill(Number.POSITIVE_INFINITY);
const prev: number[] = new Array(pointCount).fill(null);
dist[start] = 0;
while (unvisited.length) {
let from = unvisited[0];
// pick vertex with min dist
for (let i = 1; i < unvisited.length; i++) {
if (dist[unvisited[i]] < dist[from]) {
from = unvisited[i];
}
}
unvisited.splice(unvisited.indexOf(from), 1);
for (const to of delaunay.neighbors(from)) {
const [x0, y0] = [delaunay.points[from * 2], delaunay.points[from * 2 + 1]];
const [x1, y1] = [delaunay.points[to * 2], delaunay.points[to * 2 + 1]];
const [dx, dy] = [x1 - x0, y1 - y0];
let d = dist[from] + Math.sqrt(dx * dx + dy * dy);
if (d < dist[to]) {
dist[to] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) |
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriangulation;
const wallDensity = Math.random() * 0.1 + 0.1;
for (let i = 0; i < halfedges.length; i++) {
if (Math.random() > wallDensity) continue;
const j = halfedges[i];
if (j < i) continue;
const p0 = interior[triangles[i]];
const p1 = interior[triangles[j]];
if (waterPath.includes(p0) && waterPath.includes(p1)) continue;
usedPoints.push(p0);
usedPoints.push(p1);
const [x0, y0] = points[p0];
const [x1, y1] = points[p1];
mapBuilder.addWall(x0, y0, x1, y1, Math.random());
}
// add stream along water path
for (let i = 0; i < waterPath.length - 1; i++) {
const [x0, y0] = points[waterPath[i]];
const [x1, y1] = points[waterPath[i + 1]];
mapBuilder.addStream(x0, y0, x1, y1);
}
// add crossings at open points along the stream
const crossings = [];
for (let i = 0; i < waterPath.length - 1; i++) {
if (interior.includes(waterPath[i]) && !usedPoints.includes(waterPath[i])) {
crossings.push(waterPath[i]);
usedPoints.push(waterPath[i]);
}
}
for (let i = 0; i < crossings.length; i++) {
const [x, y] = points[crossings[i]];
mapBuilder.addCrossing(x, y);
usedPoints.push(crossings[i]);
crossings.splice(i, 1);
}
// pick an open point to use as a starting point
const start = interior.find(i => !usedPoints.includes(i));
mapBuilder.map.start = points[start].map(Math.round) as [number, number];
usedPoints.push(start);
mapBuilder.addPaddock(points[start][0], points[start][1]);
// place grasses at other open points
const grassDensity = Math.random() * 0.5 + 0.25;
for (let i = 0; i < interior.length; i++) {
const j = interior[i];
if (!usedPoints.includes(j) && Math.random() < grassDensity) {
const [x, y] = points[j];
mapBuilder.plantGrass(x, y);
}
}
// grow grass
for (let i = 0; i < 6; i++) {
mapBuilder.iterateGrass();
}
// place sheep at other points
const openPoints = interior.filter(i => !usedPoints.includes(i));
const sheepCount = 8; // TODO: where should this come from?
while (openPoints.length > 0 && mapBuilder.map.sheepStart.length < sheepCount) {
const j = ~~(Math.random() * openPoints.length);
mapBuilder.map.sheepStart.push(points[openPoints[j]]);
openPoints.splice(j, 0);
}
return mapBuilder.map;
}
// render map for testing
export function previewMap(map: Map) {
for (let y = 0; y < map.height; y++) {
for (let x = 0; x < map.width; x++) {
if (map.get(x, y) === Tile.Wall) {
renderer.set(x, y, palette.chestnut);
} else if (map.get(x, y) === Tile.Grass) {
renderer.set(x, y, palette.forestGreen);
} else if (map.get(x, y) === Tile.Water) {
renderer.set(x, y, palette.midnightBlue);
} else if (map.get(x, y) === Tile.Crossing) {
renderer.set(x, y, palette.aquamarine);
}
}
}
}
| {
points.push([x, y]);
} | conditional_block |
callback.go | package core
import (
"context"
"fmt"
"net/http"
"github.com/anz-bank/sysl-go/log"
"github.com/anz-bank/sysl-go/common"
"github.com/anz-bank/sysl-go/config"
"github.com/anz-bank/sysl-go/core/authrules"
"github.com/anz-bank/sysl-go/jwtauth"
"github.com/go-chi/chi"
"google.golang.org/grpc"
)
// RestGenCallback is used by `sysl-go` to call hand-crafted code.
type RestGenCallback interface {
// AddMiddleware allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service. | type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
switch {
case h.OverrideMakeJWTClaimsBasedAuthorizationRule != nil:
claimsBasedAuthRuleFactory = h.OverrideMakeJWTClaimsBasedAuthorizationRule
default:
claimsBasedAuthRuleFactory = authrules.MakeDefaultJWTClaimsBasedAuthorizationRule
}
claimsBasedAuthRule, err := claimsBasedAuthRuleFactory(authRuleExpression)
if err != nil {
return nil, err
}
// TODO(fletcher) inject custom http client instrumented with monitoring
httpClient, err := config.DefaultHTTPClient(ctx, nil)
if err != nil {
return nil, err
}
httpClientFactory := func(_ string) *http.Client {
return httpClient
}
// Note: this will start a new jwtauth.Authenticator with its own cache & threads running for each of our service's endpoints, we usually want a shared one.
if cfg == nil || cfg.Library.Authentication == nil || cfg.Library.Authentication.JWTAuth == nil {
return nil, fmt.Errorf("method/endpoint %s requires a JWT-based authorization rule, but there is no config for library.authentication.jwtauth", endpointName)
}
authenticator, err := jwtauth.AuthFromConfig(ctx, cfg.Library.Authentication.JWTAuth, httpClientFactory)
if err != nil {
return nil, err
}
return ruleFactory(claimsBasedAuthRule, authenticator)
} | random_line_split | |
callback.go | package core
import (
"context"
"fmt"
"net/http"
"github.com/anz-bank/sysl-go/log"
"github.com/anz-bank/sysl-go/common"
"github.com/anz-bank/sysl-go/config"
"github.com/anz-bank/sysl-go/core/authrules"
"github.com/anz-bank/sysl-go/jwtauth"
"github.com/go-chi/chi"
"google.golang.org/grpc"
)
// RestGenCallback is used by `sysl-go` to call hand-crafted code.
type RestGenCallback interface {
// AddMiddleware allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func | (ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
switch {
case h.OverrideMakeJWTClaimsBasedAuthorizationRule != nil:
claimsBasedAuthRuleFactory = h.OverrideMakeJWTClaimsBasedAuthorizationRule
default:
claimsBasedAuthRuleFactory = authrules.MakeDefaultJWTClaimsBasedAuthorizationRule
}
claimsBasedAuthRule, err := claimsBasedAuthRuleFactory(authRuleExpression)
if err != nil {
return nil, err
}
// TODO(fletcher) inject custom http client instrumented with monitoring
httpClient, err := config.DefaultHTTPClient(ctx, nil)
if err != nil {
return nil, err
}
httpClientFactory := func(_ string) *http.Client {
return httpClient
}
// Note: this will start a new jwtauth.Authenticator with its own cache & threads running for each of our service's endpoints, we usually want a shared one.
if cfg == nil || cfg.Library.Authentication == nil || cfg.Library.Authentication.JWTAuth == nil {
return nil, fmt.Errorf("method/endpoint %s requires a JWT-based authorization rule, but there is no config for library.authentication.jwtauth", endpointName)
}
authenticator, err := jwtauth.AuthFromConfig(ctx, cfg.Library.Authentication.JWTAuth, httpClientFactory)
if err != nil {
return nil, err
}
return ruleFactory(claimsBasedAuthRule, authenticator)
}
| ResolveGRPCAuthorizationRule | identifier_name |
callback.go | package core
import (
"context"
"fmt"
"net/http"
"github.com/anz-bank/sysl-go/log"
"github.com/anz-bank/sysl-go/common"
"github.com/anz-bank/sysl-go/config"
"github.com/anz-bank/sysl-go/core/authrules"
"github.com/anz-bank/sysl-go/jwtauth"
"github.com/go-chi/chi"
"google.golang.org/grpc"
)
// RestGenCallback is used by `sysl-go` to call hand-crafted code.
type RestGenCallback interface {
// AddMiddleware allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
switch {
case h.OverrideMakeJWTClaimsBasedAuthorizationRule != nil:
claimsBasedAuthRuleFactory = h.OverrideMakeJWTClaimsBasedAuthorizationRule
default:
claimsBasedAuthRuleFactory = authrules.MakeDefaultJWTClaimsBasedAuthorizationRule
}
claimsBasedAuthRule, err := claimsBasedAuthRuleFactory(authRuleExpression)
if err != nil {
return nil, err
}
// TODO(fletcher) inject custom http client instrumented with monitoring
httpClient, err := config.DefaultHTTPClient(ctx, nil)
if err != nil {
return nil, err
}
httpClientFactory := func(_ string) *http.Client {
return httpClient
}
// Note: this will start a new jwtauth.Authenticator with its own cache & threads running for each of our service's endpoints, we usually want a shared one.
if cfg == nil || cfg.Library.Authentication == nil || cfg.Library.Authentication.JWTAuth == nil |
authenticator, err := jwtauth.AuthFromConfig(ctx, cfg.Library.Authentication.JWTAuth, httpClientFactory)
if err != nil {
return nil, err
}
return ruleFactory(claimsBasedAuthRule, authenticator)
}
| {
return nil, fmt.Errorf("method/endpoint %s requires a JWT-based authorization rule, but there is no config for library.authentication.jwtauth", endpointName)
} | conditional_block |
callback.go | package core
import (
"context"
"fmt"
"net/http"
"github.com/anz-bank/sysl-go/log"
"github.com/anz-bank/sysl-go/common"
"github.com/anz-bank/sysl-go/config"
"github.com/anz-bank/sysl-go/core/authrules"
"github.com/anz-bank/sysl-go/jwtauth"
"github.com/go-chi/chi"
"google.golang.org/grpc"
)
// RestGenCallback is used by `sysl-go` to call hand-crafted code.
type RestGenCallback interface {
// AddMiddleware allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) |
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
switch {
case h.OverrideMakeJWTClaimsBasedAuthorizationRule != nil:
claimsBasedAuthRuleFactory = h.OverrideMakeJWTClaimsBasedAuthorizationRule
default:
claimsBasedAuthRuleFactory = authrules.MakeDefaultJWTClaimsBasedAuthorizationRule
}
claimsBasedAuthRule, err := claimsBasedAuthRuleFactory(authRuleExpression)
if err != nil {
return nil, err
}
// TODO(fletcher) inject custom http client instrumented with monitoring
httpClient, err := config.DefaultHTTPClient(ctx, nil)
if err != nil {
return nil, err
}
httpClientFactory := func(_ string) *http.Client {
return httpClient
}
// Note: this will start a new jwtauth.Authenticator with its own cache & threads running for each of our service's endpoints, we usually want a shared one.
if cfg == nil || cfg.Library.Authentication == nil || cfg.Library.Authentication.JWTAuth == nil {
return nil, fmt.Errorf("method/endpoint %s requires a JWT-based authorization rule, but there is no config for library.authentication.jwtauth", endpointName)
}
authenticator, err := jwtauth.AuthFromConfig(ctx, cfg.Library.Authentication.JWTAuth, httpClientFactory)
if err != nil {
return nil, err
}
return ruleFactory(claimsBasedAuthRule, authenticator)
}
| {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
} | identifier_body |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() | {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
} | identifier_body | |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => |
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
}
| {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
} | conditional_block |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget, | pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
} | random_line_split | |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn | () -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
}
| default | identifier_name |
tls.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"reflect"
"sort"
"strings"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/pkg/log"
)
// Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTLS(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {
svcConfigs := make([]model.Config, 0)
for index := range configs {
virtualService := configs[index].Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, configs[index])
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls {
for _, match := range tls.Match {
if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
destinationCIDRs := []string{destinationCIDR}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled[matchHash] {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta),
})
hasTLSMatch = true
}
matchHasBeenHandled[matchHash] = true
}
}
}
}
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
// Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR.
// In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from
// having to generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetServiceAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard {
sniHosts = []string{string(service.Hostname)}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts |
// This function can be called for namespaces with the auto generated sidecar, i.e. once per service and per port.
// OR, it could be called in the context of an egress listener with specific TCP port on a sidecar config.
// In the latter case, there is no service associated with this listen port. So we have to account for this
// missing service throughout this file
func buildSidecarOutboundTCPTLSFilterChainOpts(node *model.Proxy, push *model.PushContext,
configs []model.Config, destinationCIDR string, service *model.Service, listenPort *model.Port,
gateways map[string]bool) []*filterChainOpts {
out := make([]*filterChainOpts, 0)
var svcConfigs []model.Config
if service != nil {
svcConfigs = getConfigsForHost(service.Hostname, configs)
} else {
svcConfigs = configs
}
out = append(out, buildSidecarOutboundTLSFilterChainOpts(node, push, destinationCIDR, service,
listenPort, gateways, svcConfigs)...)
out = append(out, buildSidecarOutboundTCPFilterChainOpts(node, push, destinationCIDR, service,
listenPort, gateways, svcConfigs)...)
return out
}
| {
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
destinationCIDRs := []string{destinationCIDR}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
defaultRouteAdded = true
break TcpLoop
}
// Use the service's virtual address first.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
virtualServiceDestinationSubnets := make([]string, 0)
for _, match := range tcp.Match {
if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Scan all the match blocks
// if we find any match block without a runtime destination subnet match
// i.e. match any destination address, then we treat it as the terminal match/catch all match
// and break out of the loop. We also treat it as a terminal match if the listener is bound
// to a unix domain socket.
// But if we find only runtime destination subnet matches in all match blocks, collect them
// (this is similar to virtual hosts in http) and create filter chain match accordingly.
if len(match.DestinationSubnets) == 0 || listenPort.Port == 0 {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
defaultRouteAdded = true
break TcpLoop
} else {
virtualServiceDestinationSubnets = append(virtualServiceDestinationSubnets, match.DestinationSubnets...)
}
}
}
if len(virtualServiceDestinationSubnets) > 0 {
out = append(out, &filterChainOpts{
destinationCIDRs: virtualServiceDestinationSubnets,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
// If at this point there is a filter chain generated with the same CIDR match as the
// one that may be generated for the service as the default route, do not generate it.
// Otherwise, Envoy will complain about having filter chains with identical matches
// and will reject the config.
sort.Strings(virtualServiceDestinationSubnets)
sort.Strings(destinationCIDRs)
if reflect.DeepEqual(virtualServiceDestinationSubnets, destinationCIDRs) {
log.Warnf("Existing filter chain with same matching CIDR: %v.", destinationCIDRs)
defaultRouteAdded = true
}
}
}
}
if !defaultRouteAdded {
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
out = append(out, &filterChainOpts{
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.